]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | |
43506d95 | 3 | * Copyright (c) 2005 Voltaire, Inc. All rights reserved. |
c1a0b23b | 4 | * Copyright (c) 2006 Intel Corporation. All rights reserved. |
1da177e4 LT |
5 | * |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
1da177e4 LT |
33 | */ |
34 | ||
35 | #include <linux/module.h> | |
36 | #include <linux/init.h> | |
37 | #include <linux/err.h> | |
38 | #include <linux/random.h> | |
39 | #include <linux/spinlock.h> | |
40 | #include <linux/slab.h> | |
1da177e4 LT |
41 | #include <linux/dma-mapping.h> |
42 | #include <linux/kref.h> | |
43 | #include <linux/idr.h> | |
4e57b681 | 44 | #include <linux/workqueue.h> |
dd5f03be | 45 | #include <uapi/linux/if_ether.h> |
a4d61e84 | 46 | #include <rdma/ib_pack.h> |
6d969a47 | 47 | #include <rdma/ib_cache.h> |
2ca546b9 KW |
48 | #include <rdma/rdma_netlink.h> |
49 | #include <net/netlink.h> | |
50 | #include <uapi/rdma/ib_user_sa.h> | |
51 | #include <rdma/ib_marshall.h> | |
20029832 | 52 | #include <rdma/ib_addr.h> |
d98bb7f7 | 53 | #include <rdma/opa_addr.h> |
faec2f7b | 54 | #include "sa.h" |
20029832 | 55 | #include "core_priv.h" |
1da177e4 | 56 | |
2ca546b9 KW |
57 | #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100 |
58 | #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000 | |
59 | #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000 | |
ee1c60b1 DC |
60 | #define IB_SA_CPI_MAX_RETRY_CNT 3 |
61 | #define IB_SA_CPI_RETRY_WAIT 1000 /*msecs */ | |
2ca546b9 KW |
62 | static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT; |
63 | ||
1da177e4 LT |
64 | struct ib_sa_sm_ah { |
65 | struct ib_ah *ah; | |
66 | struct kref ref; | |
2aec5c60 | 67 | u16 pkey_index; |
d0e7bb14 | 68 | u8 src_path_mask; |
1da177e4 LT |
69 | }; |
70 | ||
2196f271 DC |
71 | enum rdma_class_port_info_type { |
72 | RDMA_CLASS_PORT_INFO_IB, | |
73 | RDMA_CLASS_PORT_INFO_OPA | |
74 | }; | |
75 | ||
76 | struct rdma_class_port_info { | |
77 | enum rdma_class_port_info_type type; | |
78 | union { | |
79 | struct ib_class_port_info ib; | |
80 | struct opa_class_port_info opa; | |
81 | }; | |
82 | }; | |
83 | ||
3d3fd742 AV |
84 | struct ib_sa_classport_cache { |
85 | bool valid; | |
ee1c60b1 | 86 | int retry_cnt; |
2196f271 | 87 | struct rdma_class_port_info data; |
3d3fd742 AV |
88 | }; |
89 | ||
1da177e4 LT |
90 | struct ib_sa_port { |
91 | struct ib_mad_agent *agent; | |
1da177e4 LT |
92 | struct ib_sa_sm_ah *sm_ah; |
93 | struct work_struct update_task; | |
3d3fd742 | 94 | struct ib_sa_classport_cache classport_info; |
ee1c60b1 | 95 | struct delayed_work ib_cpi_work; |
3d3fd742 | 96 | spinlock_t classport_lock; /* protects class port info set */ |
1da177e4 LT |
97 | spinlock_t ah_lock; |
98 | u8 port_num; | |
99 | }; | |
100 | ||
101 | struct ib_sa_device { | |
102 | int start_port, end_port; | |
103 | struct ib_event_handler event_handler; | |
104 | struct ib_sa_port port[0]; | |
105 | }; | |
106 | ||
107 | struct ib_sa_query { | |
108 | void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); | |
109 | void (*release)(struct ib_sa_query *); | |
c1a0b23b | 110 | struct ib_sa_client *client; |
34816ad9 SH |
111 | struct ib_sa_port *port; |
112 | struct ib_mad_send_buf *mad_buf; | |
113 | struct ib_sa_sm_ah *sm_ah; | |
114 | int id; | |
2ca546b9 KW |
115 | u32 flags; |
116 | struct list_head list; /* Local svc request list */ | |
117 | u32 seq; /* Local svc request sequence number */ | |
118 | unsigned long timeout; /* Local svc timeout */ | |
119 | u8 path_use; /* How will the pathrecord be used */ | |
1da177e4 LT |
120 | }; |
121 | ||
2ca546b9 KW |
122 | #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001 |
123 | #define IB_SA_CANCEL 0x00000002 | |
2196f271 | 124 | #define IB_SA_QUERY_OPA 0x00000004 |
2ca546b9 | 125 | |
cbae32c5 HR |
126 | struct ib_sa_service_query { |
127 | void (*callback)(int, struct ib_sa_service_rec *, void *); | |
128 | void *context; | |
129 | struct ib_sa_query sa_query; | |
130 | }; | |
131 | ||
1da177e4 | 132 | struct ib_sa_path_query { |
c2f8fc4e | 133 | void (*callback)(int, struct sa_path_rec *, void *); |
1da177e4 LT |
134 | void *context; |
135 | struct ib_sa_query sa_query; | |
4c33bd19 | 136 | struct sa_path_rec *conv_pr; |
1da177e4 LT |
137 | }; |
138 | ||
aeab97ed ES |
139 | struct ib_sa_guidinfo_query { |
140 | void (*callback)(int, struct ib_sa_guidinfo_rec *, void *); | |
141 | void *context; | |
142 | struct ib_sa_query sa_query; | |
143 | }; | |
144 | ||
628e6f75 | 145 | struct ib_sa_classport_info_query { |
ee1c60b1 | 146 | void (*callback)(void *); |
628e6f75 ES |
147 | void *context; |
148 | struct ib_sa_query sa_query; | |
149 | }; | |
150 | ||
1da177e4 LT |
151 | struct ib_sa_mcmember_query { |
152 | void (*callback)(int, struct ib_sa_mcmember_rec *, void *); | |
153 | void *context; | |
154 | struct ib_sa_query sa_query; | |
155 | }; | |
156 | ||
2ca546b9 KW |
157 | static LIST_HEAD(ib_nl_request_list); |
158 | static DEFINE_SPINLOCK(ib_nl_request_lock); | |
159 | static atomic_t ib_nl_sa_request_seq; | |
160 | static struct workqueue_struct *ib_nl_wq; | |
161 | static struct delayed_work ib_nl_timed_work; | |
162 | static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = { | |
163 | [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY, | |
164 | .len = sizeof(struct ib_path_rec_data)}, | |
165 | [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32}, | |
166 | [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64}, | |
167 | [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, | |
168 | .len = sizeof(struct rdma_nla_ls_gid)}, | |
169 | [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY, | |
170 | .len = sizeof(struct rdma_nla_ls_gid)}, | |
171 | [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8}, | |
172 | [LS_NLA_TYPE_PKEY] = {.type = NLA_U16}, | |
173 | [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16}, | |
174 | }; | |
175 | ||
176 | ||
1da177e4 | 177 | static void ib_sa_add_one(struct ib_device *device); |
7c1eb45a | 178 | static void ib_sa_remove_one(struct ib_device *device, void *client_data); |
1da177e4 LT |
179 | |
180 | static struct ib_client sa_client = { | |
181 | .name = "sa", | |
182 | .add = ib_sa_add_one, | |
183 | .remove = ib_sa_remove_one | |
184 | }; | |
185 | ||
6276e08a | 186 | static DEFINE_SPINLOCK(idr_lock); |
1da177e4 LT |
187 | static DEFINE_IDR(query_idr); |
188 | ||
6276e08a | 189 | static DEFINE_SPINLOCK(tid_lock); |
1da177e4 LT |
190 | static u32 tid; |
191 | ||
1da177e4 | 192 | #define PATH_REC_FIELD(field) \ |
c2f8fc4e DC |
193 | .struct_offset_bytes = offsetof(struct sa_path_rec, field), \ |
194 | .struct_size_bytes = sizeof((struct sa_path_rec *)0)->field, \ | |
1da177e4 LT |
195 | .field_name = "sa_path_rec:" #field |
196 | ||
197 | static const struct ib_field path_rec_table[] = { | |
d3957b86 | 198 | { PATH_REC_FIELD(service_id), |
1da177e4 LT |
199 | .offset_words = 0, |
200 | .offset_bits = 0, | |
733d65fe | 201 | .size_bits = 64 }, |
1da177e4 LT |
202 | { PATH_REC_FIELD(dgid), |
203 | .offset_words = 2, | |
204 | .offset_bits = 0, | |
205 | .size_bits = 128 }, | |
206 | { PATH_REC_FIELD(sgid), | |
207 | .offset_words = 6, | |
208 | .offset_bits = 0, | |
209 | .size_bits = 128 }, | |
9fdca4da | 210 | { PATH_REC_FIELD(ib.dlid), |
1da177e4 LT |
211 | .offset_words = 10, |
212 | .offset_bits = 0, | |
213 | .size_bits = 16 }, | |
9fdca4da | 214 | { PATH_REC_FIELD(ib.slid), |
1da177e4 LT |
215 | .offset_words = 10, |
216 | .offset_bits = 16, | |
217 | .size_bits = 16 }, | |
9fdca4da | 218 | { PATH_REC_FIELD(ib.raw_traffic), |
1da177e4 LT |
219 | .offset_words = 11, |
220 | .offset_bits = 0, | |
221 | .size_bits = 1 }, | |
222 | { RESERVED, | |
223 | .offset_words = 11, | |
224 | .offset_bits = 1, | |
225 | .size_bits = 3 }, | |
226 | { PATH_REC_FIELD(flow_label), | |
227 | .offset_words = 11, | |
228 | .offset_bits = 4, | |
229 | .size_bits = 20 }, | |
230 | { PATH_REC_FIELD(hop_limit), | |
231 | .offset_words = 11, | |
232 | .offset_bits = 24, | |
233 | .size_bits = 8 }, | |
234 | { PATH_REC_FIELD(traffic_class), | |
235 | .offset_words = 12, | |
236 | .offset_bits = 0, | |
237 | .size_bits = 8 }, | |
238 | { PATH_REC_FIELD(reversible), | |
239 | .offset_words = 12, | |
240 | .offset_bits = 8, | |
241 | .size_bits = 1 }, | |
242 | { PATH_REC_FIELD(numb_path), | |
243 | .offset_words = 12, | |
244 | .offset_bits = 9, | |
245 | .size_bits = 7 }, | |
246 | { PATH_REC_FIELD(pkey), | |
247 | .offset_words = 12, | |
248 | .offset_bits = 16, | |
249 | .size_bits = 16 }, | |
733d65fe | 250 | { PATH_REC_FIELD(qos_class), |
1da177e4 LT |
251 | .offset_words = 13, |
252 | .offset_bits = 0, | |
253 | .size_bits = 12 }, | |
254 | { PATH_REC_FIELD(sl), | |
255 | .offset_words = 13, | |
256 | .offset_bits = 12, | |
257 | .size_bits = 4 }, | |
258 | { PATH_REC_FIELD(mtu_selector), | |
259 | .offset_words = 13, | |
260 | .offset_bits = 16, | |
261 | .size_bits = 2 }, | |
262 | { PATH_REC_FIELD(mtu), | |
263 | .offset_words = 13, | |
264 | .offset_bits = 18, | |
265 | .size_bits = 6 }, | |
266 | { PATH_REC_FIELD(rate_selector), | |
267 | .offset_words = 13, | |
268 | .offset_bits = 24, | |
269 | .size_bits = 2 }, | |
270 | { PATH_REC_FIELD(rate), | |
271 | .offset_words = 13, | |
272 | .offset_bits = 26, | |
273 | .size_bits = 6 }, | |
274 | { PATH_REC_FIELD(packet_life_time_selector), | |
275 | .offset_words = 14, | |
276 | .offset_bits = 0, | |
277 | .size_bits = 2 }, | |
278 | { PATH_REC_FIELD(packet_life_time), | |
279 | .offset_words = 14, | |
280 | .offset_bits = 2, | |
281 | .size_bits = 6 }, | |
282 | { PATH_REC_FIELD(preference), | |
283 | .offset_words = 14, | |
284 | .offset_bits = 8, | |
285 | .size_bits = 8 }, | |
286 | { RESERVED, | |
287 | .offset_words = 14, | |
288 | .offset_bits = 16, | |
289 | .size_bits = 48 }, | |
290 | }; | |
291 | ||
4c33bd19 DC |
292 | #define OPA_PATH_REC_FIELD(field) \ |
293 | .struct_offset_bytes = \ | |
294 | offsetof(struct sa_path_rec, field), \ | |
295 | .struct_size_bytes = \ | |
296 | sizeof((struct sa_path_rec *)0)->field, \ | |
297 | .field_name = "sa_path_rec:" #field | |
298 | ||
299 | static const struct ib_field opa_path_rec_table[] = { | |
d3957b86 | 300 | { OPA_PATH_REC_FIELD(service_id), |
4c33bd19 DC |
301 | .offset_words = 0, |
302 | .offset_bits = 0, | |
303 | .size_bits = 64 }, | |
304 | { OPA_PATH_REC_FIELD(dgid), | |
305 | .offset_words = 2, | |
306 | .offset_bits = 0, | |
307 | .size_bits = 128 }, | |
308 | { OPA_PATH_REC_FIELD(sgid), | |
309 | .offset_words = 6, | |
310 | .offset_bits = 0, | |
311 | .size_bits = 128 }, | |
312 | { OPA_PATH_REC_FIELD(opa.dlid), | |
313 | .offset_words = 10, | |
314 | .offset_bits = 0, | |
315 | .size_bits = 32 }, | |
316 | { OPA_PATH_REC_FIELD(opa.slid), | |
317 | .offset_words = 11, | |
318 | .offset_bits = 0, | |
319 | .size_bits = 32 }, | |
320 | { OPA_PATH_REC_FIELD(opa.raw_traffic), | |
321 | .offset_words = 12, | |
322 | .offset_bits = 0, | |
323 | .size_bits = 1 }, | |
324 | { RESERVED, | |
325 | .offset_words = 12, | |
326 | .offset_bits = 1, | |
327 | .size_bits = 3 }, | |
328 | { OPA_PATH_REC_FIELD(flow_label), | |
329 | .offset_words = 12, | |
330 | .offset_bits = 4, | |
331 | .size_bits = 20 }, | |
332 | { OPA_PATH_REC_FIELD(hop_limit), | |
333 | .offset_words = 12, | |
334 | .offset_bits = 24, | |
335 | .size_bits = 8 }, | |
336 | { OPA_PATH_REC_FIELD(traffic_class), | |
337 | .offset_words = 13, | |
338 | .offset_bits = 0, | |
339 | .size_bits = 8 }, | |
340 | { OPA_PATH_REC_FIELD(reversible), | |
341 | .offset_words = 13, | |
342 | .offset_bits = 8, | |
343 | .size_bits = 1 }, | |
344 | { OPA_PATH_REC_FIELD(numb_path), | |
345 | .offset_words = 13, | |
346 | .offset_bits = 9, | |
347 | .size_bits = 7 }, | |
348 | { OPA_PATH_REC_FIELD(pkey), | |
349 | .offset_words = 13, | |
350 | .offset_bits = 16, | |
351 | .size_bits = 16 }, | |
352 | { OPA_PATH_REC_FIELD(opa.l2_8B), | |
353 | .offset_words = 14, | |
354 | .offset_bits = 0, | |
355 | .size_bits = 1 }, | |
356 | { OPA_PATH_REC_FIELD(opa.l2_10B), | |
357 | .offset_words = 14, | |
358 | .offset_bits = 1, | |
359 | .size_bits = 1 }, | |
360 | { OPA_PATH_REC_FIELD(opa.l2_9B), | |
361 | .offset_words = 14, | |
362 | .offset_bits = 2, | |
363 | .size_bits = 1 }, | |
364 | { OPA_PATH_REC_FIELD(opa.l2_16B), | |
365 | .offset_words = 14, | |
366 | .offset_bits = 3, | |
367 | .size_bits = 1 }, | |
368 | { RESERVED, | |
369 | .offset_words = 14, | |
370 | .offset_bits = 4, | |
371 | .size_bits = 2 }, | |
372 | { OPA_PATH_REC_FIELD(opa.qos_type), | |
373 | .offset_words = 14, | |
374 | .offset_bits = 6, | |
375 | .size_bits = 2 }, | |
376 | { OPA_PATH_REC_FIELD(opa.qos_priority), | |
377 | .offset_words = 14, | |
378 | .offset_bits = 8, | |
379 | .size_bits = 8 }, | |
380 | { RESERVED, | |
381 | .offset_words = 14, | |
382 | .offset_bits = 16, | |
383 | .size_bits = 3 }, | |
384 | { OPA_PATH_REC_FIELD(sl), | |
385 | .offset_words = 14, | |
386 | .offset_bits = 19, | |
387 | .size_bits = 5 }, | |
388 | { RESERVED, | |
389 | .offset_words = 14, | |
390 | .offset_bits = 24, | |
391 | .size_bits = 8 }, | |
392 | { OPA_PATH_REC_FIELD(mtu_selector), | |
393 | .offset_words = 15, | |
394 | .offset_bits = 0, | |
395 | .size_bits = 2 }, | |
396 | { OPA_PATH_REC_FIELD(mtu), | |
397 | .offset_words = 15, | |
398 | .offset_bits = 2, | |
399 | .size_bits = 6 }, | |
400 | { OPA_PATH_REC_FIELD(rate_selector), | |
401 | .offset_words = 15, | |
402 | .offset_bits = 8, | |
403 | .size_bits = 2 }, | |
404 | { OPA_PATH_REC_FIELD(rate), | |
405 | .offset_words = 15, | |
406 | .offset_bits = 10, | |
407 | .size_bits = 6 }, | |
408 | { OPA_PATH_REC_FIELD(packet_life_time_selector), | |
409 | .offset_words = 15, | |
410 | .offset_bits = 16, | |
411 | .size_bits = 2 }, | |
412 | { OPA_PATH_REC_FIELD(packet_life_time), | |
413 | .offset_words = 15, | |
414 | .offset_bits = 18, | |
415 | .size_bits = 6 }, | |
416 | { OPA_PATH_REC_FIELD(preference), | |
417 | .offset_words = 15, | |
418 | .offset_bits = 24, | |
419 | .size_bits = 8 }, | |
420 | }; | |
421 | ||
1da177e4 LT |
422 | #define MCMEMBER_REC_FIELD(field) \ |
423 | .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \ | |
424 | .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \ | |
425 | .field_name = "sa_mcmember_rec:" #field | |
426 | ||
427 | static const struct ib_field mcmember_rec_table[] = { | |
428 | { MCMEMBER_REC_FIELD(mgid), | |
429 | .offset_words = 0, | |
430 | .offset_bits = 0, | |
431 | .size_bits = 128 }, | |
432 | { MCMEMBER_REC_FIELD(port_gid), | |
433 | .offset_words = 4, | |
434 | .offset_bits = 0, | |
435 | .size_bits = 128 }, | |
436 | { MCMEMBER_REC_FIELD(qkey), | |
437 | .offset_words = 8, | |
438 | .offset_bits = 0, | |
439 | .size_bits = 32 }, | |
440 | { MCMEMBER_REC_FIELD(mlid), | |
441 | .offset_words = 9, | |
442 | .offset_bits = 0, | |
443 | .size_bits = 16 }, | |
444 | { MCMEMBER_REC_FIELD(mtu_selector), | |
445 | .offset_words = 9, | |
446 | .offset_bits = 16, | |
447 | .size_bits = 2 }, | |
448 | { MCMEMBER_REC_FIELD(mtu), | |
449 | .offset_words = 9, | |
450 | .offset_bits = 18, | |
451 | .size_bits = 6 }, | |
452 | { MCMEMBER_REC_FIELD(traffic_class), | |
453 | .offset_words = 9, | |
454 | .offset_bits = 24, | |
455 | .size_bits = 8 }, | |
456 | { MCMEMBER_REC_FIELD(pkey), | |
457 | .offset_words = 10, | |
458 | .offset_bits = 0, | |
459 | .size_bits = 16 }, | |
460 | { MCMEMBER_REC_FIELD(rate_selector), | |
461 | .offset_words = 10, | |
462 | .offset_bits = 16, | |
463 | .size_bits = 2 }, | |
464 | { MCMEMBER_REC_FIELD(rate), | |
465 | .offset_words = 10, | |
466 | .offset_bits = 18, | |
467 | .size_bits = 6 }, | |
468 | { MCMEMBER_REC_FIELD(packet_life_time_selector), | |
469 | .offset_words = 10, | |
470 | .offset_bits = 24, | |
471 | .size_bits = 2 }, | |
472 | { MCMEMBER_REC_FIELD(packet_life_time), | |
473 | .offset_words = 10, | |
474 | .offset_bits = 26, | |
475 | .size_bits = 6 }, | |
476 | { MCMEMBER_REC_FIELD(sl), | |
477 | .offset_words = 11, | |
478 | .offset_bits = 0, | |
479 | .size_bits = 4 }, | |
480 | { MCMEMBER_REC_FIELD(flow_label), | |
481 | .offset_words = 11, | |
482 | .offset_bits = 4, | |
483 | .size_bits = 20 }, | |
484 | { MCMEMBER_REC_FIELD(hop_limit), | |
485 | .offset_words = 11, | |
486 | .offset_bits = 24, | |
487 | .size_bits = 8 }, | |
488 | { MCMEMBER_REC_FIELD(scope), | |
489 | .offset_words = 12, | |
490 | .offset_bits = 0, | |
491 | .size_bits = 4 }, | |
492 | { MCMEMBER_REC_FIELD(join_state), | |
493 | .offset_words = 12, | |
494 | .offset_bits = 4, | |
495 | .size_bits = 4 }, | |
496 | { MCMEMBER_REC_FIELD(proxy_join), | |
497 | .offset_words = 12, | |
498 | .offset_bits = 8, | |
499 | .size_bits = 1 }, | |
500 | { RESERVED, | |
501 | .offset_words = 12, | |
502 | .offset_bits = 9, | |
503 | .size_bits = 23 }, | |
504 | }; | |
505 | ||
cbae32c5 HR |
506 | #define SERVICE_REC_FIELD(field) \ |
507 | .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \ | |
508 | .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \ | |
509 | .field_name = "sa_service_rec:" #field | |
510 | ||
511 | static const struct ib_field service_rec_table[] = { | |
512 | { SERVICE_REC_FIELD(id), | |
513 | .offset_words = 0, | |
514 | .offset_bits = 0, | |
515 | .size_bits = 64 }, | |
516 | { SERVICE_REC_FIELD(gid), | |
517 | .offset_words = 2, | |
518 | .offset_bits = 0, | |
519 | .size_bits = 128 }, | |
520 | { SERVICE_REC_FIELD(pkey), | |
521 | .offset_words = 6, | |
522 | .offset_bits = 0, | |
523 | .size_bits = 16 }, | |
524 | { SERVICE_REC_FIELD(lease), | |
525 | .offset_words = 7, | |
526 | .offset_bits = 0, | |
527 | .size_bits = 32 }, | |
528 | { SERVICE_REC_FIELD(key), | |
529 | .offset_words = 8, | |
530 | .offset_bits = 0, | |
531 | .size_bits = 128 }, | |
532 | { SERVICE_REC_FIELD(name), | |
533 | .offset_words = 12, | |
534 | .offset_bits = 0, | |
535 | .size_bits = 64*8 }, | |
536 | { SERVICE_REC_FIELD(data8), | |
537 | .offset_words = 28, | |
538 | .offset_bits = 0, | |
539 | .size_bits = 16*8 }, | |
540 | { SERVICE_REC_FIELD(data16), | |
541 | .offset_words = 32, | |
542 | .offset_bits = 0, | |
543 | .size_bits = 8*16 }, | |
544 | { SERVICE_REC_FIELD(data32), | |
545 | .offset_words = 36, | |
546 | .offset_bits = 0, | |
547 | .size_bits = 4*32 }, | |
548 | { SERVICE_REC_FIELD(data64), | |
549 | .offset_words = 40, | |
550 | .offset_bits = 0, | |
551 | .size_bits = 2*64 }, | |
552 | }; | |
553 | ||
628e6f75 ES |
554 | #define CLASSPORTINFO_REC_FIELD(field) \ |
555 | .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \ | |
556 | .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \ | |
557 | .field_name = "ib_class_port_info:" #field | |
558 | ||
2196f271 | 559 | static const struct ib_field ib_classport_info_rec_table[] = { |
628e6f75 ES |
560 | { CLASSPORTINFO_REC_FIELD(base_version), |
561 | .offset_words = 0, | |
562 | .offset_bits = 0, | |
563 | .size_bits = 8 }, | |
564 | { CLASSPORTINFO_REC_FIELD(class_version), | |
565 | .offset_words = 0, | |
566 | .offset_bits = 8, | |
567 | .size_bits = 8 }, | |
568 | { CLASSPORTINFO_REC_FIELD(capability_mask), | |
569 | .offset_words = 0, | |
570 | .offset_bits = 16, | |
571 | .size_bits = 16 }, | |
572 | { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), | |
573 | .offset_words = 1, | |
574 | .offset_bits = 0, | |
575 | .size_bits = 32 }, | |
576 | { CLASSPORTINFO_REC_FIELD(redirect_gid), | |
577 | .offset_words = 2, | |
578 | .offset_bits = 0, | |
579 | .size_bits = 128 }, | |
580 | { CLASSPORTINFO_REC_FIELD(redirect_tcslfl), | |
581 | .offset_words = 6, | |
582 | .offset_bits = 0, | |
583 | .size_bits = 32 }, | |
584 | { CLASSPORTINFO_REC_FIELD(redirect_lid), | |
585 | .offset_words = 7, | |
586 | .offset_bits = 0, | |
587 | .size_bits = 16 }, | |
588 | { CLASSPORTINFO_REC_FIELD(redirect_pkey), | |
589 | .offset_words = 7, | |
590 | .offset_bits = 16, | |
591 | .size_bits = 16 }, | |
592 | ||
593 | { CLASSPORTINFO_REC_FIELD(redirect_qp), | |
594 | .offset_words = 8, | |
595 | .offset_bits = 0, | |
596 | .size_bits = 32 }, | |
597 | { CLASSPORTINFO_REC_FIELD(redirect_qkey), | |
598 | .offset_words = 9, | |
599 | .offset_bits = 0, | |
600 | .size_bits = 32 }, | |
601 | ||
602 | { CLASSPORTINFO_REC_FIELD(trap_gid), | |
603 | .offset_words = 10, | |
604 | .offset_bits = 0, | |
605 | .size_bits = 128 }, | |
606 | { CLASSPORTINFO_REC_FIELD(trap_tcslfl), | |
607 | .offset_words = 14, | |
608 | .offset_bits = 0, | |
609 | .size_bits = 32 }, | |
610 | ||
611 | { CLASSPORTINFO_REC_FIELD(trap_lid), | |
612 | .offset_words = 15, | |
613 | .offset_bits = 0, | |
614 | .size_bits = 16 }, | |
615 | { CLASSPORTINFO_REC_FIELD(trap_pkey), | |
616 | .offset_words = 15, | |
617 | .offset_bits = 16, | |
618 | .size_bits = 16 }, | |
619 | ||
620 | { CLASSPORTINFO_REC_FIELD(trap_hlqp), | |
621 | .offset_words = 16, | |
622 | .offset_bits = 0, | |
623 | .size_bits = 32 }, | |
624 | { CLASSPORTINFO_REC_FIELD(trap_qkey), | |
625 | .offset_words = 17, | |
626 | .offset_bits = 0, | |
627 | .size_bits = 32 }, | |
628 | }; | |
629 | ||
2196f271 DC |
630 | #define OPA_CLASSPORTINFO_REC_FIELD(field) \ |
631 | .struct_offset_bytes =\ | |
632 | offsetof(struct opa_class_port_info, field), \ | |
633 | .struct_size_bytes = \ | |
634 | sizeof((struct opa_class_port_info *)0)->field, \ | |
635 | .field_name = "opa_class_port_info:" #field | |
636 | ||
637 | static const struct ib_field opa_classport_info_rec_table[] = { | |
638 | { OPA_CLASSPORTINFO_REC_FIELD(base_version), | |
639 | .offset_words = 0, | |
640 | .offset_bits = 0, | |
641 | .size_bits = 8 }, | |
642 | { OPA_CLASSPORTINFO_REC_FIELD(class_version), | |
643 | .offset_words = 0, | |
644 | .offset_bits = 8, | |
645 | .size_bits = 8 }, | |
646 | { OPA_CLASSPORTINFO_REC_FIELD(cap_mask), | |
647 | .offset_words = 0, | |
648 | .offset_bits = 16, | |
649 | .size_bits = 16 }, | |
650 | { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), | |
651 | .offset_words = 1, | |
652 | .offset_bits = 0, | |
653 | .size_bits = 32 }, | |
654 | { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid), | |
655 | .offset_words = 2, | |
656 | .offset_bits = 0, | |
657 | .size_bits = 128 }, | |
658 | { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl), | |
659 | .offset_words = 6, | |
660 | .offset_bits = 0, | |
661 | .size_bits = 32 }, | |
662 | { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid), | |
663 | .offset_words = 7, | |
664 | .offset_bits = 0, | |
665 | .size_bits = 32 }, | |
666 | { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp), | |
667 | .offset_words = 8, | |
668 | .offset_bits = 0, | |
669 | .size_bits = 32 }, | |
670 | { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey), | |
671 | .offset_words = 9, | |
672 | .offset_bits = 0, | |
673 | .size_bits = 32 }, | |
674 | { OPA_CLASSPORTINFO_REC_FIELD(trap_gid), | |
675 | .offset_words = 10, | |
676 | .offset_bits = 0, | |
677 | .size_bits = 128 }, | |
678 | { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl), | |
679 | .offset_words = 14, | |
680 | .offset_bits = 0, | |
681 | .size_bits = 32 }, | |
682 | { OPA_CLASSPORTINFO_REC_FIELD(trap_lid), | |
683 | .offset_words = 15, | |
684 | .offset_bits = 0, | |
685 | .size_bits = 32 }, | |
686 | { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp), | |
687 | .offset_words = 16, | |
688 | .offset_bits = 0, | |
689 | .size_bits = 32 }, | |
690 | { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey), | |
691 | .offset_words = 17, | |
692 | .offset_bits = 0, | |
693 | .size_bits = 32 }, | |
694 | { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey), | |
695 | .offset_words = 18, | |
696 | .offset_bits = 0, | |
697 | .size_bits = 16 }, | |
698 | { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey), | |
699 | .offset_words = 18, | |
700 | .offset_bits = 16, | |
701 | .size_bits = 16 }, | |
702 | { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd), | |
703 | .offset_words = 19, | |
704 | .offset_bits = 0, | |
705 | .size_bits = 8 }, | |
706 | { RESERVED, | |
707 | .offset_words = 19, | |
708 | .offset_bits = 8, | |
709 | .size_bits = 24 }, | |
710 | }; | |
711 | ||
aeab97ed ES |
712 | #define GUIDINFO_REC_FIELD(field) \ |
713 | .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ | |
714 | .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \ | |
715 | .field_name = "sa_guidinfo_rec:" #field | |
716 | ||
717 | static const struct ib_field guidinfo_rec_table[] = { | |
718 | { GUIDINFO_REC_FIELD(lid), | |
719 | .offset_words = 0, | |
720 | .offset_bits = 0, | |
721 | .size_bits = 16 }, | |
722 | { GUIDINFO_REC_FIELD(block_num), | |
723 | .offset_words = 0, | |
724 | .offset_bits = 16, | |
725 | .size_bits = 8 }, | |
726 | { GUIDINFO_REC_FIELD(res1), | |
727 | .offset_words = 0, | |
728 | .offset_bits = 24, | |
729 | .size_bits = 8 }, | |
730 | { GUIDINFO_REC_FIELD(res2), | |
731 | .offset_words = 1, | |
732 | .offset_bits = 0, | |
733 | .size_bits = 32 }, | |
734 | { GUIDINFO_REC_FIELD(guid_info_list), | |
735 | .offset_words = 2, | |
736 | .offset_bits = 0, | |
737 | .size_bits = 512 }, | |
738 | }; | |
739 | ||
2ca546b9 KW |
740 | static inline void ib_sa_disable_local_svc(struct ib_sa_query *query) |
741 | { | |
742 | query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE; | |
743 | } | |
744 | ||
745 | static inline int ib_sa_query_cancelled(struct ib_sa_query *query) | |
746 | { | |
747 | return (query->flags & IB_SA_CANCEL); | |
748 | } | |
749 | ||
750 | static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, | |
751 | struct ib_sa_query *query) | |
752 | { | |
c2f8fc4e | 753 | struct sa_path_rec *sa_rec = query->mad_buf->context[1]; |
2ca546b9 KW |
754 | struct ib_sa_mad *mad = query->mad_buf->mad; |
755 | ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask; | |
756 | u16 val16; | |
757 | u64 val64; | |
758 | struct rdma_ls_resolve_header *header; | |
759 | ||
760 | query->mad_buf->context[1] = NULL; | |
761 | ||
762 | /* Construct the family header first */ | |
4df864c1 | 763 | header = skb_put(skb, NLMSG_ALIGN(sizeof(*header))); |
2ca546b9 KW |
764 | memcpy(header->device_name, query->port->agent->device->name, |
765 | LS_DEVICE_NAME_MAX); | |
766 | header->port_num = query->port->port_num; | |
767 | ||
768 | if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) && | |
769 | sa_rec->reversible != 0) | |
770 | query->path_use = LS_RESOLVE_PATH_USE_GMP; | |
771 | else | |
772 | query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL; | |
773 | header->path_use = query->path_use; | |
774 | ||
775 | /* Now build the attributes */ | |
776 | if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { | |
d3957b86 | 777 | val64 = be64_to_cpu(sa_rec->service_id); |
2ca546b9 KW |
778 | nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, |
779 | sizeof(val64), &val64); | |
780 | } | |
781 | if (comp_mask & IB_SA_PATH_REC_DGID) | |
782 | nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID, | |
783 | sizeof(sa_rec->dgid), &sa_rec->dgid); | |
784 | if (comp_mask & IB_SA_PATH_REC_SGID) | |
785 | nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID, | |
786 | sizeof(sa_rec->sgid), &sa_rec->sgid); | |
787 | if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) | |
788 | nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS, | |
789 | sizeof(sa_rec->traffic_class), &sa_rec->traffic_class); | |
790 | ||
791 | if (comp_mask & IB_SA_PATH_REC_PKEY) { | |
792 | val16 = be16_to_cpu(sa_rec->pkey); | |
793 | nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY, | |
794 | sizeof(val16), &val16); | |
795 | } | |
796 | if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) { | |
797 | val16 = be16_to_cpu(sa_rec->qos_class); | |
798 | nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS, | |
799 | sizeof(val16), &val16); | |
800 | } | |
801 | } | |
802 | ||
803 | static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) | |
804 | { | |
805 | int len = 0; | |
806 | ||
807 | if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) | |
808 | len += nla_total_size(sizeof(u64)); | |
809 | if (comp_mask & IB_SA_PATH_REC_DGID) | |
810 | len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); | |
811 | if (comp_mask & IB_SA_PATH_REC_SGID) | |
812 | len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); | |
813 | if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) | |
814 | len += nla_total_size(sizeof(u8)); | |
815 | if (comp_mask & IB_SA_PATH_REC_PKEY) | |
816 | len += nla_total_size(sizeof(u16)); | |
817 | if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) | |
818 | len += nla_total_size(sizeof(u16)); | |
819 | ||
820 | /* | |
821 | * Make sure that at least some of the required comp_mask bits are | |
822 | * set. | |
823 | */ | |
824 | if (WARN_ON(len == 0)) | |
825 | return len; | |
826 | ||
827 | /* Add the family header */ | |
828 | len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header)); | |
829 | ||
830 | return len; | |
831 | } | |
832 | ||
3ebd2fd0 | 833 | static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) |
2ca546b9 KW |
834 | { |
835 | struct sk_buff *skb = NULL; | |
836 | struct nlmsghdr *nlh; | |
837 | void *data; | |
838 | int ret = 0; | |
839 | struct ib_sa_mad *mad; | |
840 | int len; | |
841 | ||
842 | mad = query->mad_buf->mad; | |
843 | len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); | |
844 | if (len <= 0) | |
845 | return -EMSGSIZE; | |
846 | ||
3ebd2fd0 | 847 | skb = nlmsg_new(len, gfp_mask); |
2ca546b9 KW |
848 | if (!skb) |
849 | return -ENOMEM; | |
850 | ||
851 | /* Put nlmsg header only for now */ | |
852 | data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS, | |
ba13b5f8 | 853 | RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST); |
2ca546b9 | 854 | if (!data) { |
0f377d86 | 855 | nlmsg_free(skb); |
2ca546b9 KW |
856 | return -EMSGSIZE; |
857 | } | |
858 | ||
859 | /* Add attributes */ | |
860 | ib_nl_set_path_rec_attrs(skb, query); | |
861 | ||
862 | /* Repair the nlmsg header length */ | |
863 | nlmsg_end(skb, nlh); | |
864 | ||
4d7f693a | 865 | ret = rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask); |
2ca546b9 KW |
866 | if (!ret) |
867 | ret = len; | |
868 | else | |
869 | ret = 0; | |
870 | ||
871 | return ret; | |
872 | } | |
873 | ||
3ebd2fd0 | 874 | static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) |
2ca546b9 KW |
875 | { |
876 | unsigned long flags; | |
877 | unsigned long delay; | |
878 | int ret; | |
879 | ||
880 | INIT_LIST_HEAD(&query->list); | |
881 | query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); | |
882 | ||
3ebd2fd0 | 883 | /* Put the request on the list first.*/ |
2ca546b9 | 884 | spin_lock_irqsave(&ib_nl_request_lock, flags); |
2ca546b9 KW |
885 | delay = msecs_to_jiffies(sa_local_svc_timeout_ms); |
886 | query->timeout = delay + jiffies; | |
887 | list_add_tail(&query->list, &ib_nl_request_list); | |
888 | /* Start the timeout if this is the only request */ | |
889 | if (ib_nl_request_list.next == &query->list) | |
890 | queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); | |
2ca546b9 KW |
891 | spin_unlock_irqrestore(&ib_nl_request_lock, flags); |
892 | ||
3ebd2fd0 KW |
893 | ret = ib_nl_send_msg(query, gfp_mask); |
894 | if (ret <= 0) { | |
895 | ret = -EIO; | |
896 | /* Remove the request */ | |
897 | spin_lock_irqsave(&ib_nl_request_lock, flags); | |
898 | list_del(&query->list); | |
899 | spin_unlock_irqrestore(&ib_nl_request_lock, flags); | |
900 | } else { | |
901 | ret = 0; | |
902 | } | |
903 | ||
2ca546b9 KW |
904 | return ret; |
905 | } | |
906 | ||
907 | static int ib_nl_cancel_request(struct ib_sa_query *query) | |
908 | { | |
909 | unsigned long flags; | |
910 | struct ib_sa_query *wait_query; | |
911 | int found = 0; | |
912 | ||
913 | spin_lock_irqsave(&ib_nl_request_lock, flags); | |
914 | list_for_each_entry(wait_query, &ib_nl_request_list, list) { | |
915 | /* Let the timeout to take care of the callback */ | |
916 | if (query == wait_query) { | |
917 | query->flags |= IB_SA_CANCEL; | |
918 | query->timeout = jiffies; | |
919 | list_move(&query->list, &ib_nl_request_list); | |
920 | found = 1; | |
921 | mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1); | |
922 | break; | |
923 | } | |
924 | } | |
925 | spin_unlock_irqrestore(&ib_nl_request_lock, flags); | |
926 | ||
927 | return found; | |
928 | } | |
929 | ||
930 | static void send_handler(struct ib_mad_agent *agent, | |
931 | struct ib_mad_send_wc *mad_send_wc); | |
932 | ||
933 | static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query, | |
934 | const struct nlmsghdr *nlh) | |
935 | { | |
936 | struct ib_mad_send_wc mad_send_wc; | |
937 | struct ib_sa_mad *mad = NULL; | |
938 | const struct nlattr *head, *curr; | |
939 | struct ib_path_rec_data *rec; | |
940 | int len, rem; | |
941 | u32 mask = 0; | |
942 | int status = -EIO; | |
943 | ||
944 | if (query->callback) { | |
945 | head = (const struct nlattr *) nlmsg_data(nlh); | |
946 | len = nlmsg_len(nlh); | |
947 | switch (query->path_use) { | |
948 | case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL: | |
949 | mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND; | |
950 | break; | |
951 | ||
952 | case LS_RESOLVE_PATH_USE_ALL: | |
953 | case LS_RESOLVE_PATH_USE_GMP: | |
954 | default: | |
955 | mask = IB_PATH_PRIMARY | IB_PATH_GMP | | |
956 | IB_PATH_BIDIRECTIONAL; | |
957 | break; | |
958 | } | |
959 | nla_for_each_attr(curr, head, len, rem) { | |
960 | if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) { | |
961 | rec = nla_data(curr); | |
962 | /* | |
963 | * Get the first one. In the future, we may | |
964 | * need to get up to 6 pathrecords. | |
965 | */ | |
966 | if ((rec->flags & mask) == mask) { | |
967 | mad = query->mad_buf->mad; | |
968 | mad->mad_hdr.method |= | |
969 | IB_MGMT_METHOD_RESP; | |
970 | memcpy(mad->data, rec->path_rec, | |
971 | sizeof(rec->path_rec)); | |
972 | status = 0; | |
973 | break; | |
974 | } | |
975 | } | |
976 | } | |
977 | query->callback(query, status, mad); | |
978 | } | |
979 | ||
980 | mad_send_wc.send_buf = query->mad_buf; | |
981 | mad_send_wc.status = IB_WC_SUCCESS; | |
982 | send_handler(query->mad_buf->mad_agent, &mad_send_wc); | |
983 | } | |
984 | ||
985 | static void ib_nl_request_timeout(struct work_struct *work) | |
986 | { | |
987 | unsigned long flags; | |
988 | struct ib_sa_query *query; | |
989 | unsigned long delay; | |
990 | struct ib_mad_send_wc mad_send_wc; | |
991 | int ret; | |
992 | ||
993 | spin_lock_irqsave(&ib_nl_request_lock, flags); | |
994 | while (!list_empty(&ib_nl_request_list)) { | |
995 | query = list_entry(ib_nl_request_list.next, | |
996 | struct ib_sa_query, list); | |
997 | ||
998 | if (time_after(query->timeout, jiffies)) { | |
999 | delay = query->timeout - jiffies; | |
1000 | if ((long)delay <= 0) | |
1001 | delay = 1; | |
1002 | queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); | |
1003 | break; | |
1004 | } | |
1005 | ||
1006 | list_del(&query->list); | |
1007 | ib_sa_disable_local_svc(query); | |
1008 | /* Hold the lock to protect against query cancellation */ | |
1009 | if (ib_sa_query_cancelled(query)) | |
1010 | ret = -1; | |
1011 | else | |
1012 | ret = ib_post_send_mad(query->mad_buf, NULL); | |
1013 | if (ret) { | |
1014 | mad_send_wc.send_buf = query->mad_buf; | |
1015 | mad_send_wc.status = IB_WC_WR_FLUSH_ERR; | |
1016 | spin_unlock_irqrestore(&ib_nl_request_lock, flags); | |
1017 | send_handler(query->port->agent, &mad_send_wc); | |
1018 | spin_lock_irqsave(&ib_nl_request_lock, flags); | |
1019 | } | |
1020 | } | |
1021 | spin_unlock_irqrestore(&ib_nl_request_lock, flags); | |
1022 | } | |
1023 | ||
735c631a | 1024 | int ib_nl_handle_set_timeout(struct sk_buff *skb, |
647c75ac LR |
1025 | struct nlmsghdr *nlh, |
1026 | struct netlink_ext_ack *extack) | |
2ca546b9 | 1027 | { |
2ca546b9 KW |
1028 | int timeout, delta, abs_delta; |
1029 | const struct nlattr *attr; | |
1030 | unsigned long flags; | |
1031 | struct ib_sa_query *query; | |
1032 | long delay = 0; | |
1033 | struct nlattr *tb[LS_NLA_TYPE_MAX]; | |
1034 | int ret; | |
1035 | ||
2deeb477 | 1036 | if (!(nlh->nlmsg_flags & NLM_F_REQUEST) || |
e3a2b93d | 1037 | !(NETLINK_CB(skb).sk)) |
2ca546b9 KW |
1038 | return -EPERM; |
1039 | ||
1040 | ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), | |
fceb6435 | 1041 | nlmsg_len(nlh), ib_nl_policy, NULL); |
2ca546b9 KW |
1042 | attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT]; |
1043 | if (ret || !attr) | |
1044 | goto settimeout_out; | |
1045 | ||
1046 | timeout = *(int *) nla_data(attr); | |
1047 | if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN) | |
1048 | timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN; | |
1049 | if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX) | |
1050 | timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX; | |
1051 | ||
1052 | delta = timeout - sa_local_svc_timeout_ms; | |
1053 | if (delta < 0) | |
1054 | abs_delta = -delta; | |
1055 | else | |
1056 | abs_delta = delta; | |
1057 | ||
1058 | if (delta != 0) { | |
1059 | spin_lock_irqsave(&ib_nl_request_lock, flags); | |
1060 | sa_local_svc_timeout_ms = timeout; | |
1061 | list_for_each_entry(query, &ib_nl_request_list, list) { | |
1062 | if (delta < 0 && abs_delta > query->timeout) | |
1063 | query->timeout = 0; | |
1064 | else | |
1065 | query->timeout += delta; | |
1066 | ||
1067 | /* Get the new delay from the first entry */ | |
1068 | if (!delay) { | |
1069 | delay = query->timeout - jiffies; | |
1070 | if (delay <= 0) | |
1071 | delay = 1; | |
1072 | } | |
1073 | } | |
1074 | if (delay) | |
1075 | mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, | |
1076 | (unsigned long)delay); | |
1077 | spin_unlock_irqrestore(&ib_nl_request_lock, flags); | |
1078 | } | |
1079 | ||
1080 | settimeout_out: | |
1081 | return skb->len; | |
1082 | } | |
1083 | ||
1084 | static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh) | |
1085 | { | |
1086 | struct nlattr *tb[LS_NLA_TYPE_MAX]; | |
1087 | int ret; | |
1088 | ||
1089 | if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) | |
1090 | return 0; | |
1091 | ||
1092 | ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), | |
fceb6435 | 1093 | nlmsg_len(nlh), ib_nl_policy, NULL); |
2ca546b9 KW |
1094 | if (ret) |
1095 | return 0; | |
1096 | ||
1097 | return 1; | |
1098 | } | |
1099 | ||
735c631a | 1100 | int ib_nl_handle_resolve_resp(struct sk_buff *skb, |
647c75ac LR |
1101 | struct nlmsghdr *nlh, |
1102 | struct netlink_ext_ack *extack) | |
2ca546b9 | 1103 | { |
2ca546b9 KW |
1104 | unsigned long flags; |
1105 | struct ib_sa_query *query; | |
1106 | struct ib_mad_send_buf *send_buf; | |
1107 | struct ib_mad_send_wc mad_send_wc; | |
1108 | int found = 0; | |
1109 | int ret; | |
1110 | ||
2deeb477 | 1111 | if ((nlh->nlmsg_flags & NLM_F_REQUEST) || |
e3a2b93d | 1112 | !(NETLINK_CB(skb).sk)) |
2ca546b9 KW |
1113 | return -EPERM; |
1114 | ||
1115 | spin_lock_irqsave(&ib_nl_request_lock, flags); | |
1116 | list_for_each_entry(query, &ib_nl_request_list, list) { | |
1117 | /* | |
1118 | * If the query is cancelled, let the timeout routine | |
1119 | * take care of it. | |
1120 | */ | |
1121 | if (nlh->nlmsg_seq == query->seq) { | |
1122 | found = !ib_sa_query_cancelled(query); | |
1123 | if (found) | |
1124 | list_del(&query->list); | |
1125 | break; | |
1126 | } | |
1127 | } | |
1128 | ||
1129 | if (!found) { | |
1130 | spin_unlock_irqrestore(&ib_nl_request_lock, flags); | |
1131 | goto resp_out; | |
1132 | } | |
1133 | ||
1134 | send_buf = query->mad_buf; | |
1135 | ||
1136 | if (!ib_nl_is_good_resolve_resp(nlh)) { | |
1137 | /* if the result is a failure, send out the packet via IB */ | |
1138 | ib_sa_disable_local_svc(query); | |
1139 | ret = ib_post_send_mad(query->mad_buf, NULL); | |
1140 | spin_unlock_irqrestore(&ib_nl_request_lock, flags); | |
1141 | if (ret) { | |
1142 | mad_send_wc.send_buf = send_buf; | |
1143 | mad_send_wc.status = IB_WC_GENERAL_ERR; | |
1144 | send_handler(query->port->agent, &mad_send_wc); | |
1145 | } | |
1146 | } else { | |
1147 | spin_unlock_irqrestore(&ib_nl_request_lock, flags); | |
1148 | ib_nl_process_good_resolve_rsp(query, nlh); | |
1149 | } | |
1150 | ||
1151 | resp_out: | |
1152 | return skb->len; | |
1153 | } | |
1154 | ||
1da177e4 LT |
1155 | static void free_sm_ah(struct kref *kref) |
1156 | { | |
1157 | struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); | |
1158 | ||
36523159 | 1159 | rdma_destroy_ah(sm_ah->ah); |
1da177e4 LT |
1160 | kfree(sm_ah); |
1161 | } | |
1162 | ||
c1a0b23b MT |
1163 | void ib_sa_register_client(struct ib_sa_client *client) |
1164 | { | |
1165 | atomic_set(&client->users, 1); | |
1166 | init_completion(&client->comp); | |
1167 | } | |
1168 | EXPORT_SYMBOL(ib_sa_register_client); | |
1169 | ||
c1a0b23b MT |
1170 | void ib_sa_unregister_client(struct ib_sa_client *client) |
1171 | { | |
1172 | ib_sa_client_put(client); | |
1173 | wait_for_completion(&client->comp); | |
1174 | } | |
1175 | EXPORT_SYMBOL(ib_sa_unregister_client); | |
1176 | ||
1da177e4 LT |
1177 | /** |
1178 | * ib_sa_cancel_query - try to cancel an SA query | |
1179 | * @id:ID of query to cancel | |
1180 | * @query:query pointer to cancel | |
1181 | * | |
1182 | * Try to cancel an SA query. If the id and query don't match up or | |
1183 | * the query has already completed, nothing is done. Otherwise the | |
1184 | * query is canceled and will complete with a status of -EINTR. | |
1185 | */ | |
1186 | void ib_sa_cancel_query(int id, struct ib_sa_query *query) | |
1187 | { | |
1188 | unsigned long flags; | |
1189 | struct ib_mad_agent *agent; | |
34816ad9 | 1190 | struct ib_mad_send_buf *mad_buf; |
1da177e4 LT |
1191 | |
1192 | spin_lock_irqsave(&idr_lock, flags); | |
1193 | if (idr_find(&query_idr, id) != query) { | |
1194 | spin_unlock_irqrestore(&idr_lock, flags); | |
1195 | return; | |
1196 | } | |
1197 | agent = query->port->agent; | |
34816ad9 | 1198 | mad_buf = query->mad_buf; |
1da177e4 LT |
1199 | spin_unlock_irqrestore(&idr_lock, flags); |
1200 | ||
2ca546b9 KW |
1201 | /* |
1202 | * If the query is still on the netlink request list, schedule | |
1203 | * it to be cancelled by the timeout routine. Otherwise, it has been | |
1204 | * sent to the MAD layer and has to be cancelled from there. | |
1205 | */ | |
1206 | if (!ib_nl_cancel_request(query)) | |
1207 | ib_cancel_mad(agent, mad_buf); | |
1da177e4 LT |
1208 | } |
1209 | EXPORT_SYMBOL(ib_sa_cancel_query); | |
1210 | ||
d0e7bb14 SH |
1211 | static u8 get_src_path_mask(struct ib_device *device, u8 port_num) |
1212 | { | |
1213 | struct ib_sa_device *sa_dev; | |
1214 | struct ib_sa_port *port; | |
1215 | unsigned long flags; | |
1216 | u8 src_path_mask; | |
1217 | ||
1218 | sa_dev = ib_get_client_data(device, &sa_client); | |
1219 | if (!sa_dev) | |
1220 | return 0x7f; | |
1221 | ||
1222 | port = &sa_dev->port[port_num - sa_dev->start_port]; | |
1223 | spin_lock_irqsave(&port->ah_lock, flags); | |
1224 | src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f; | |
1225 | spin_unlock_irqrestore(&port->ah_lock, flags); | |
1226 | ||
1227 | return src_path_mask; | |
1228 | } | |
1229 | ||
0a514159 PP |
1230 | static int |
1231 | roce_resolve_route_from_path(struct ib_device *device, u8 port_num, | |
39839107 PP |
1232 | struct sa_path_rec *rec, |
1233 | const struct ib_gid_attr *attr) | |
6d969a47 | 1234 | { |
0a514159 | 1235 | struct net_device *resolved_dev; |
0a514159 | 1236 | struct net_device *idev; |
39839107 | 1237 | struct rdma_dev_addr dev_addr = {}; |
0a514159 PP |
1238 | union { |
1239 | struct sockaddr _sockaddr; | |
1240 | struct sockaddr_in _sockaddr_in; | |
1241 | struct sockaddr_in6 _sockaddr_in6; | |
1242 | } sgid_addr, dgid_addr; | |
6d969a47 | 1243 | int ret; |
6d969a47 | 1244 | |
114cc9c4 PP |
1245 | if (rec->roce.route_resolved) |
1246 | return 0; | |
39839107 PP |
1247 | if (!attr || !attr->ndev) |
1248 | return -EINVAL; | |
1249 | ||
1250 | dev_addr.bound_dev_if = attr->ndev->ifindex; | |
1251 | /* TODO: Use net from the ib_gid_attr once it is added to it, | |
1252 | * until than, limit itself to init_net. | |
1253 | */ | |
1254 | dev_addr.net = &init_net; | |
114cc9c4 | 1255 | |
0a514159 PP |
1256 | if (!device->get_netdev) |
1257 | return -EOPNOTSUPP; | |
6d969a47 | 1258 | |
0a514159 PP |
1259 | rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid); |
1260 | rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid); | |
d98bb7f7 | 1261 | |
0a514159 PP |
1262 | /* validate the route */ |
1263 | ret = rdma_resolve_ip_route(&sgid_addr._sockaddr, | |
1264 | &dgid_addr._sockaddr, &dev_addr); | |
1265 | if (ret) | |
1266 | return ret; | |
1267 | ||
1268 | if ((dev_addr.network == RDMA_NETWORK_IPV4 || | |
1269 | dev_addr.network == RDMA_NETWORK_IPV6) && | |
1270 | rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2) | |
1271 | return -EINVAL; | |
1272 | ||
1273 | idev = device->get_netdev(device, port_num); | |
1274 | if (!idev) | |
1275 | return -ENODEV; | |
1276 | ||
1277 | resolved_dev = dev_get_by_index(dev_addr.net, | |
1278 | dev_addr.bound_dev_if); | |
1279 | if (!resolved_dev) { | |
1280 | ret = -ENODEV; | |
1281 | goto done; | |
1282 | } | |
0a514159 | 1283 | rcu_read_lock(); |
39839107 | 1284 | if (attr->ndev != resolved_dev || |
0a514159 PP |
1285 | (resolved_dev != idev && |
1286 | !rdma_is_upper_dev_rcu(idev, resolved_dev))) | |
1287 | ret = -EHOSTUNREACH; | |
1288 | rcu_read_unlock(); | |
1289 | dev_put(resolved_dev); | |
0a514159 PP |
1290 | done: |
1291 | dev_put(idev); | |
114cc9c4 PP |
1292 | if (!ret) |
1293 | rec->roce.route_resolved = true; | |
0a514159 PP |
1294 | return ret; |
1295 | } | |
d98bb7f7 | 1296 | |
0a514159 PP |
1297 | static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num, |
1298 | struct sa_path_rec *rec, | |
39839107 PP |
1299 | struct rdma_ah_attr *ah_attr, |
1300 | const struct ib_gid_attr *gid_attr) | |
0a514159 PP |
1301 | { |
1302 | enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec); | |
3c86aa70 | 1303 | |
39839107 PP |
1304 | if (!gid_attr) { |
1305 | gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type, | |
1306 | port_num, NULL); | |
1307 | if (IS_ERR(gid_attr)) | |
1308 | return PTR_ERR(gid_attr); | |
1309 | } else | |
1310 | rdma_hold_gid_attr(gid_attr); | |
20029832 | 1311 | |
aa74f487 PP |
1312 | rdma_move_grh_sgid_attr(ah_attr, &rec->dgid, |
1313 | be32_to_cpu(rec->flow_label), | |
1314 | rec->hop_limit, rec->traffic_class, | |
1315 | gid_attr); | |
0a514159 PP |
1316 | return 0; |
1317 | } | |
20029832 | 1318 | |
39839107 PP |
1319 | /** |
1320 | * ib_init_ah_attr_from_path - Initialize address handle attributes based on | |
1321 | * an SA path record. | |
1322 | * @device: Device associated ah attributes initialization. | |
1323 | * @port_num: Port on the specified device. | |
1324 | * @rec: path record entry to use for ah attributes initialization. | |
1325 | * @ah_attr: address handle attributes to initialization from path record. | |
1326 | * @sgid_attr: SGID attribute to consider during initialization. | |
1327 | * | |
1328 | * When ib_init_ah_attr_from_path() returns success, | |
1329 | * (a) for IB link layer it optionally contains a reference to SGID attribute | |
1330 | * when GRH is present for IB link layer. | |
1331 | * (b) for RoCE link layer it contains a reference to SGID attribute. | |
1332 | * User must invoke rdma_destroy_ah_attr() to release reference to SGID | |
1333 | * attributes which are initialized using ib_init_ah_attr_from_path(). | |
1334 | */ | |
0a514159 PP |
1335 | int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num, |
1336 | struct sa_path_rec *rec, | |
39839107 PP |
1337 | struct rdma_ah_attr *ah_attr, |
1338 | const struct ib_gid_attr *gid_attr) | |
0a514159 PP |
1339 | { |
1340 | int ret = 0; | |
ba36e37f | 1341 | |
0a514159 PP |
1342 | memset(ah_attr, 0, sizeof(*ah_attr)); |
1343 | ah_attr->type = rdma_ah_find_type(device, port_num); | |
1344 | rdma_ah_set_sl(ah_attr, rec->sl); | |
1345 | rdma_ah_set_port_num(ah_attr, port_num); | |
1346 | rdma_ah_set_static_rate(ah_attr, rec->rate); | |
dfa834e1 | 1347 | |
0a514159 | 1348 | if (sa_path_is_roce(rec)) { |
39839107 PP |
1349 | ret = roce_resolve_route_from_path(device, port_num, rec, |
1350 | gid_attr); | |
0a514159 | 1351 | if (ret) |
6d969a47 SH |
1352 | return ret; |
1353 | ||
0a514159 PP |
1354 | memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN); |
1355 | } else { | |
1356 | rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec))); | |
1357 | if (sa_path_is_opa(rec) && | |
1358 | rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE)) | |
1359 | rdma_ah_set_make_grd(ah_attr, true); | |
1360 | ||
1361 | rdma_ah_set_path_bits(ah_attr, | |
1362 | be32_to_cpu(sa_path_get_slid(rec)) & | |
1363 | get_src_path_mask(device, port_num)); | |
9fdca4da | 1364 | } |
20029832 | 1365 | |
0a514159 | 1366 | if (rec->hop_limit > 0 || sa_path_is_roce(rec)) |
39839107 PP |
1367 | ret = init_ah_attr_grh_fields(device, port_num, |
1368 | rec, ah_attr, gid_attr); | |
0a514159 | 1369 | return ret; |
6d969a47 | 1370 | } |
4ad6a024 | 1371 | EXPORT_SYMBOL(ib_init_ah_attr_from_path); |
6d969a47 | 1372 | |
2aec5c60 SH |
1373 | static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) |
1374 | { | |
af808ece | 1375 | struct rdma_ah_attr ah_attr; |
2aec5c60 SH |
1376 | unsigned long flags; |
1377 | ||
1378 | spin_lock_irqsave(&query->port->ah_lock, flags); | |
164ba089 MS |
1379 | if (!query->port->sm_ah) { |
1380 | spin_unlock_irqrestore(&query->port->ah_lock, flags); | |
1381 | return -EAGAIN; | |
1382 | } | |
2aec5c60 SH |
1383 | kref_get(&query->port->sm_ah->ref); |
1384 | query->sm_ah = query->port->sm_ah; | |
1385 | spin_unlock_irqrestore(&query->port->ah_lock, flags); | |
1386 | ||
af808ece VSD |
1387 | /* |
1388 | * Always check if sm_ah has valid dlid assigned, | |
1389 | * before querying for class port info | |
1390 | */ | |
1391 | if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) || | |
1392 | !rdma_is_valid_unicast_lid(&ah_attr)) { | |
1393 | kref_put(&query->sm_ah->ref, free_sm_ah); | |
1394 | return -EAGAIN; | |
1395 | } | |
2aec5c60 SH |
1396 | query->mad_buf = ib_create_send_mad(query->port->agent, 1, |
1397 | query->sm_ah->pkey_index, | |
1398 | 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA, | |
da2dfaa3 | 1399 | gfp_mask, |
2196f271 DC |
1400 | ((query->flags & IB_SA_QUERY_OPA) ? |
1401 | OPA_MGMT_BASE_VERSION : | |
1402 | IB_MGMT_BASE_VERSION)); | |
3c10c7c9 | 1403 | if (IS_ERR(query->mad_buf)) { |
2aec5c60 SH |
1404 | kref_put(&query->sm_ah->ref, free_sm_ah); |
1405 | return -ENOMEM; | |
1406 | } | |
1407 | ||
1408 | query->mad_buf->ah = query->sm_ah->ah; | |
1409 | ||
1410 | return 0; | |
1411 | } | |
1412 | ||
1413 | static void free_mad(struct ib_sa_query *query) | |
1414 | { | |
1415 | ib_free_send_mad(query->mad_buf); | |
1416 | kref_put(&query->sm_ah->ref, free_sm_ah); | |
1417 | } | |
1418 | ||
2196f271 | 1419 | static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent) |
1da177e4 | 1420 | { |
2196f271 | 1421 | struct ib_sa_mad *mad = query->mad_buf->mad; |
1da177e4 LT |
1422 | unsigned long flags; |
1423 | ||
1424 | memset(mad, 0, sizeof *mad); | |
1425 | ||
2196f271 DC |
1426 | if (query->flags & IB_SA_QUERY_OPA) { |
1427 | mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION; | |
1428 | mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION; | |
1429 | } else { | |
1430 | mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION; | |
1431 | mad->mad_hdr.class_version = IB_SA_CLASS_VERSION; | |
1432 | } | |
1da177e4 | 1433 | mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; |
1da177e4 LT |
1434 | spin_lock_irqsave(&tid_lock, flags); |
1435 | mad->mad_hdr.tid = | |
1436 | cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++); | |
1437 | spin_unlock_irqrestore(&tid_lock, flags); | |
1438 | } | |
1439 | ||
e322fedf | 1440 | static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) |
1da177e4 | 1441 | { |
d0164adc | 1442 | bool preload = gfpflags_allow_blocking(gfp_mask); |
1da177e4 | 1443 | unsigned long flags; |
34816ad9 | 1444 | int ret, id; |
1da177e4 | 1445 | |
3b069c5d TH |
1446 | if (preload) |
1447 | idr_preload(gfp_mask); | |
1da177e4 | 1448 | spin_lock_irqsave(&idr_lock, flags); |
3b069c5d TH |
1449 | |
1450 | id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT); | |
1451 | ||
1da177e4 | 1452 | spin_unlock_irqrestore(&idr_lock, flags); |
3b069c5d TH |
1453 | if (preload) |
1454 | idr_preload_end(); | |
1455 | if (id < 0) | |
1456 | return id; | |
1da177e4 | 1457 | |
34816ad9 SH |
1458 | query->mad_buf->timeout_ms = timeout_ms; |
1459 | query->mad_buf->context[0] = query; | |
1460 | query->id = id; | |
1da177e4 | 1461 | |
4c33bd19 DC |
1462 | if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) && |
1463 | (!(query->flags & IB_SA_QUERY_OPA))) { | |
ff61c425 | 1464 | if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) { |
3ebd2fd0 | 1465 | if (!ib_nl_make_request(query, gfp_mask)) |
2ca546b9 KW |
1466 | return id; |
1467 | } | |
1468 | ib_sa_disable_local_svc(query); | |
1469 | } | |
1470 | ||
34816ad9 | 1471 | ret = ib_post_send_mad(query->mad_buf, NULL); |
1da177e4 | 1472 | if (ret) { |
1da177e4 | 1473 | spin_lock_irqsave(&idr_lock, flags); |
34816ad9 | 1474 | idr_remove(&query_idr, id); |
1da177e4 LT |
1475 | spin_unlock_irqrestore(&idr_lock, flags); |
1476 | } | |
1477 | ||
dae4c1d2 RD |
1478 | /* |
1479 | * It's not safe to dereference query any more, because the | |
1480 | * send may already have completed and freed the query in | |
34816ad9 | 1481 | * another context. |
dae4c1d2 | 1482 | */ |
34816ad9 | 1483 | return ret ? ret : id; |
1da177e4 LT |
1484 | } |
1485 | ||
c2f8fc4e | 1486 | void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec) |
a7ca1f00 SH |
1487 | { |
1488 | ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec); | |
1489 | } | |
1490 | EXPORT_SYMBOL(ib_sa_unpack_path); | |
1491 | ||
c2f8fc4e | 1492 | void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute) |
2e08b587 SH |
1493 | { |
1494 | ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute); | |
1495 | } | |
1496 | EXPORT_SYMBOL(ib_sa_pack_path); | |
1497 | ||
4c33bd19 DC |
1498 | static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client, |
1499 | struct ib_device *device, | |
1500 | u8 port_num) | |
1501 | { | |
1502 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); | |
1503 | struct ib_sa_port *port; | |
1504 | unsigned long flags; | |
1505 | bool ret = false; | |
1506 | ||
1507 | if (!sa_dev) | |
1508 | return ret; | |
1509 | ||
1510 | port = &sa_dev->port[port_num - sa_dev->start_port]; | |
1511 | spin_lock_irqsave(&port->classport_lock, flags); | |
1512 | if (!port->classport_info.valid) | |
1513 | goto ret; | |
1514 | ||
1515 | if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA) | |
1516 | ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) & | |
1517 | OPA_CLASS_PORT_INFO_PR_SUPPORT; | |
1518 | ret: | |
1519 | spin_unlock_irqrestore(&port->classport_lock, flags); | |
1520 | return ret; | |
1521 | } | |
1522 | ||
1523 | enum opa_pr_supported { | |
1524 | PR_NOT_SUPPORTED, | |
1525 | PR_OPA_SUPPORTED, | |
1526 | PR_IB_SUPPORTED | |
1527 | }; | |
1528 | ||
1529 | /** | |
1530 | * Check if current PR query can be an OPA query. | |
1531 | * Retuns PR_NOT_SUPPORTED if a path record query is not | |
1532 | * possible, PR_OPA_SUPPORTED if an OPA path record query | |
1533 | * is possible and PR_IB_SUPPORTED if an IB path record | |
1534 | * query is possible. | |
1535 | */ | |
1536 | static int opa_pr_query_possible(struct ib_sa_client *client, | |
1537 | struct ib_device *device, | |
1538 | u8 port_num, | |
1539 | struct sa_path_rec *rec) | |
1540 | { | |
1541 | struct ib_port_attr port_attr; | |
1542 | ||
1543 | if (ib_query_port(device, port_num, &port_attr)) | |
1544 | return PR_NOT_SUPPORTED; | |
1545 | ||
1546 | if (ib_sa_opa_pathrecord_support(client, device, port_num)) | |
1547 | return PR_OPA_SUPPORTED; | |
1548 | ||
1549 | if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) | |
1550 | return PR_NOT_SUPPORTED; | |
1551 | else | |
1552 | return PR_IB_SUPPORTED; | |
1553 | } | |
1554 | ||
1da177e4 LT |
1555 | static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, |
1556 | int status, | |
1557 | struct ib_sa_mad *mad) | |
1558 | { | |
1559 | struct ib_sa_path_query *query = | |
1560 | container_of(sa_query, struct ib_sa_path_query, sa_query); | |
1561 | ||
1562 | if (mad) { | |
c2f8fc4e | 1563 | struct sa_path_rec rec; |
1da177e4 | 1564 | |
4c33bd19 DC |
1565 | if (sa_query->flags & IB_SA_QUERY_OPA) { |
1566 | ib_unpack(opa_path_rec_table, | |
1567 | ARRAY_SIZE(opa_path_rec_table), | |
1568 | mad->data, &rec); | |
1569 | rec.rec_type = SA_PATH_REC_TYPE_OPA; | |
1570 | query->callback(status, &rec, query->context); | |
1571 | } else { | |
1572 | ib_unpack(path_rec_table, | |
1573 | ARRAY_SIZE(path_rec_table), | |
1574 | mad->data, &rec); | |
1575 | rec.rec_type = SA_PATH_REC_TYPE_IB; | |
4c33bd19 DC |
1576 | sa_path_set_dmac_zero(&rec); |
1577 | ||
1578 | if (query->conv_pr) { | |
1579 | struct sa_path_rec opa; | |
1580 | ||
1581 | memset(&opa, 0, sizeof(struct sa_path_rec)); | |
1582 | sa_convert_path_ib_to_opa(&opa, &rec); | |
1583 | query->callback(status, &opa, query->context); | |
1584 | } else { | |
1585 | query->callback(status, &rec, query->context); | |
1586 | } | |
1587 | } | |
1da177e4 LT |
1588 | } else |
1589 | query->callback(status, NULL, query->context); | |
1590 | } | |
1591 | ||
1592 | static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) | |
1593 | { | |
4c33bd19 DC |
1594 | struct ib_sa_path_query *query = |
1595 | container_of(sa_query, struct ib_sa_path_query, sa_query); | |
1596 | ||
1597 | kfree(query->conv_pr); | |
1598 | kfree(query); | |
1da177e4 LT |
1599 | } |
1600 | ||
1601 | /** | |
1602 | * ib_sa_path_rec_get - Start a Path get query | |
c1a0b23b | 1603 | * @client:SA client |
1da177e4 LT |
1604 | * @device:device to send query on |
1605 | * @port_num: port number to send query on | |
1606 | * @rec:Path Record to send in query | |
1607 | * @comp_mask:component mask to send in query | |
1608 | * @timeout_ms:time to wait for response | |
1609 | * @gfp_mask:GFP mask to use for internal allocations | |
1610 | * @callback:function called when query completes, times out or is | |
1611 | * canceled | |
1612 | * @context:opaque user context passed to callback | |
1613 | * @sa_query:query context, used to cancel query | |
1614 | * | |
1615 | * Send a Path Record Get query to the SA to look up a path. The | |
1616 | * callback function will be called when the query completes (or | |
1617 | * fails); status is 0 for a successful response, -EINTR if the query | |
1618 | * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error | |
1619 | * occurred sending the query. The resp parameter of the callback is | |
1620 | * only valid if status is 0. | |
1621 | * | |
1622 | * If the return value of ib_sa_path_rec_get() is negative, it is an | |
1623 | * error code. Otherwise it is a query ID that can be used to cancel | |
1624 | * the query. | |
1625 | */ | |
c1a0b23b MT |
1626 | int ib_sa_path_rec_get(struct ib_sa_client *client, |
1627 | struct ib_device *device, u8 port_num, | |
c2f8fc4e | 1628 | struct sa_path_rec *rec, |
1da177e4 | 1629 | ib_sa_comp_mask comp_mask, |
dd0fc66f | 1630 | int timeout_ms, gfp_t gfp_mask, |
1da177e4 | 1631 | void (*callback)(int status, |
c2f8fc4e | 1632 | struct sa_path_rec *resp, |
1da177e4 LT |
1633 | void *context), |
1634 | void *context, | |
1635 | struct ib_sa_query **sa_query) | |
1636 | { | |
1637 | struct ib_sa_path_query *query; | |
1638 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); | |
56c202d6 RD |
1639 | struct ib_sa_port *port; |
1640 | struct ib_mad_agent *agent; | |
34816ad9 | 1641 | struct ib_sa_mad *mad; |
4c33bd19 | 1642 | enum opa_pr_supported status; |
1da177e4 LT |
1643 | int ret; |
1644 | ||
56c202d6 RD |
1645 | if (!sa_dev) |
1646 | return -ENODEV; | |
1647 | ||
4c33bd19 DC |
1648 | if ((rec->rec_type != SA_PATH_REC_TYPE_IB) && |
1649 | (rec->rec_type != SA_PATH_REC_TYPE_OPA)) | |
dfa834e1 DC |
1650 | return -EINVAL; |
1651 | ||
56c202d6 RD |
1652 | port = &sa_dev->port[port_num - sa_dev->start_port]; |
1653 | agent = port->agent; | |
1654 | ||
5d265770 | 1655 | query = kzalloc(sizeof(*query), gfp_mask); |
1da177e4 LT |
1656 | if (!query) |
1657 | return -ENOMEM; | |
34816ad9 | 1658 | |
2aec5c60 | 1659 | query->sa_query.port = port; |
4c33bd19 DC |
1660 | if (rec->rec_type == SA_PATH_REC_TYPE_OPA) { |
1661 | status = opa_pr_query_possible(client, device, port_num, rec); | |
1662 | if (status == PR_NOT_SUPPORTED) { | |
1663 | ret = -EINVAL; | |
1664 | goto err1; | |
1665 | } else if (status == PR_OPA_SUPPORTED) { | |
1666 | query->sa_query.flags |= IB_SA_QUERY_OPA; | |
1667 | } else { | |
1668 | query->conv_pr = | |
1669 | kmalloc(sizeof(*query->conv_pr), gfp_mask); | |
1670 | if (!query->conv_pr) { | |
1671 | ret = -ENOMEM; | |
1672 | goto err1; | |
1673 | } | |
1674 | } | |
1675 | } | |
1676 | ||
2aec5c60 SH |
1677 | ret = alloc_mad(&query->sa_query, gfp_mask); |
1678 | if (ret) | |
4c33bd19 | 1679 | goto err2; |
1da177e4 | 1680 | |
c1a0b23b MT |
1681 | ib_sa_client_get(client); |
1682 | query->sa_query.client = client; | |
1683 | query->callback = callback; | |
1684 | query->context = context; | |
1da177e4 | 1685 | |
34816ad9 | 1686 | mad = query->sa_query.mad_buf->mad; |
2196f271 | 1687 | init_mad(&query->sa_query, agent); |
1da177e4 | 1688 | |
34816ad9 SH |
1689 | query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; |
1690 | query->sa_query.release = ib_sa_path_rec_release; | |
34816ad9 SH |
1691 | mad->mad_hdr.method = IB_MGMT_METHOD_GET; |
1692 | mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); | |
1693 | mad->sa_hdr.comp_mask = comp_mask; | |
1da177e4 | 1694 | |
4c33bd19 DC |
1695 | if (query->sa_query.flags & IB_SA_QUERY_OPA) { |
1696 | ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table), | |
1697 | rec, mad->data); | |
1698 | } else if (query->conv_pr) { | |
1699 | sa_convert_path_opa_to_ib(query->conv_pr, rec); | |
1700 | ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), | |
1701 | query->conv_pr, mad->data); | |
1702 | } else { | |
1703 | ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), | |
1704 | rec, mad->data); | |
1705 | } | |
1da177e4 LT |
1706 | |
1707 | *sa_query = &query->sa_query; | |
dae4c1d2 | 1708 | |
2ca546b9 | 1709 | query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE; |
4c33bd19 DC |
1710 | query->sa_query.mad_buf->context[1] = (query->conv_pr) ? |
1711 | query->conv_pr : rec; | |
2ca546b9 | 1712 | |
e322fedf | 1713 | ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); |
34816ad9 | 1714 | if (ret < 0) |
4c33bd19 | 1715 | goto err3; |
34816ad9 SH |
1716 | |
1717 | return ret; | |
1718 | ||
4c33bd19 | 1719 | err3: |
34816ad9 | 1720 | *sa_query = NULL; |
c1a0b23b | 1721 | ib_sa_client_put(query->sa_query.client); |
2aec5c60 | 1722 | free_mad(&query->sa_query); |
4c33bd19 DC |
1723 | err2: |
1724 | kfree(query->conv_pr); | |
34816ad9 SH |
1725 | err1: |
1726 | kfree(query); | |
dae4c1d2 | 1727 | return ret; |
1da177e4 LT |
1728 | } |
1729 | EXPORT_SYMBOL(ib_sa_path_rec_get); | |
1730 | ||
cbae32c5 HR |
1731 | static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, |
1732 | int status, | |
1733 | struct ib_sa_mad *mad) | |
1734 | { | |
1735 | struct ib_sa_service_query *query = | |
1736 | container_of(sa_query, struct ib_sa_service_query, sa_query); | |
1737 | ||
1738 | if (mad) { | |
1739 | struct ib_sa_service_rec rec; | |
1740 | ||
1741 | ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table), | |
1742 | mad->data, &rec); | |
1743 | query->callback(status, &rec, query->context); | |
1744 | } else | |
1745 | query->callback(status, NULL, query->context); | |
1746 | } | |
1747 | ||
1748 | static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) | |
1749 | { | |
cbae32c5 HR |
1750 | kfree(container_of(sa_query, struct ib_sa_service_query, sa_query)); |
1751 | } | |
1752 | ||
1753 | /** | |
1754 | * ib_sa_service_rec_query - Start Service Record operation | |
c1a0b23b | 1755 | * @client:SA client |
cbae32c5 HR |
1756 | * @device:device to send request on |
1757 | * @port_num: port number to send request on | |
1758 | * @method:SA method - should be get, set, or delete | |
1759 | * @rec:Service Record to send in request | |
1760 | * @comp_mask:component mask to send in request | |
1761 | * @timeout_ms:time to wait for response | |
1762 | * @gfp_mask:GFP mask to use for internal allocations | |
1763 | * @callback:function called when request completes, times out or is | |
1764 | * canceled | |
1765 | * @context:opaque user context passed to callback | |
1766 | * @sa_query:request context, used to cancel request | |
1767 | * | |
1768 | * Send a Service Record set/get/delete to the SA to register, | |
1769 | * unregister or query a service record. | |
1770 | * The callback function will be called when the request completes (or | |
1771 | * fails); status is 0 for a successful response, -EINTR if the query | |
1772 | * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error | |
1773 | * occurred sending the query. The resp parameter of the callback is | |
1774 | * only valid if status is 0. | |
1775 | * | |
1776 | * If the return value of ib_sa_service_rec_query() is negative, it is an | |
1777 | * error code. Otherwise it is a request ID that can be used to cancel | |
1778 | * the query. | |
1779 | */ | |
c1a0b23b MT |
1780 | int ib_sa_service_rec_query(struct ib_sa_client *client, |
1781 | struct ib_device *device, u8 port_num, u8 method, | |
cbae32c5 HR |
1782 | struct ib_sa_service_rec *rec, |
1783 | ib_sa_comp_mask comp_mask, | |
dd0fc66f | 1784 | int timeout_ms, gfp_t gfp_mask, |
cbae32c5 HR |
1785 | void (*callback)(int status, |
1786 | struct ib_sa_service_rec *resp, | |
1787 | void *context), | |
1788 | void *context, | |
1789 | struct ib_sa_query **sa_query) | |
1790 | { | |
1791 | struct ib_sa_service_query *query; | |
1792 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); | |
56c202d6 RD |
1793 | struct ib_sa_port *port; |
1794 | struct ib_mad_agent *agent; | |
34816ad9 | 1795 | struct ib_sa_mad *mad; |
cbae32c5 HR |
1796 | int ret; |
1797 | ||
56c202d6 RD |
1798 | if (!sa_dev) |
1799 | return -ENODEV; | |
1800 | ||
1801 | port = &sa_dev->port[port_num - sa_dev->start_port]; | |
1802 | agent = port->agent; | |
1803 | ||
cbae32c5 HR |
1804 | if (method != IB_MGMT_METHOD_GET && |
1805 | method != IB_MGMT_METHOD_SET && | |
1806 | method != IB_SA_METHOD_DELETE) | |
1807 | return -EINVAL; | |
1808 | ||
5d265770 | 1809 | query = kzalloc(sizeof(*query), gfp_mask); |
cbae32c5 HR |
1810 | if (!query) |
1811 | return -ENOMEM; | |
34816ad9 | 1812 | |
2aec5c60 SH |
1813 | query->sa_query.port = port; |
1814 | ret = alloc_mad(&query->sa_query, gfp_mask); | |
1815 | if (ret) | |
34816ad9 | 1816 | goto err1; |
cbae32c5 | 1817 | |
c1a0b23b MT |
1818 | ib_sa_client_get(client); |
1819 | query->sa_query.client = client; | |
1820 | query->callback = callback; | |
1821 | query->context = context; | |
cbae32c5 | 1822 | |
34816ad9 | 1823 | mad = query->sa_query.mad_buf->mad; |
2196f271 | 1824 | init_mad(&query->sa_query, agent); |
cbae32c5 | 1825 | |
34816ad9 SH |
1826 | query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL; |
1827 | query->sa_query.release = ib_sa_service_rec_release; | |
34816ad9 SH |
1828 | mad->mad_hdr.method = method; |
1829 | mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC); | |
1830 | mad->sa_hdr.comp_mask = comp_mask; | |
cbae32c5 HR |
1831 | |
1832 | ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), | |
34816ad9 | 1833 | rec, mad->data); |
cbae32c5 HR |
1834 | |
1835 | *sa_query = &query->sa_query; | |
1836 | ||
e322fedf | 1837 | ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); |
34816ad9 SH |
1838 | if (ret < 0) |
1839 | goto err2; | |
1840 | ||
1841 | return ret; | |
cbae32c5 | 1842 | |
34816ad9 SH |
1843 | err2: |
1844 | *sa_query = NULL; | |
c1a0b23b | 1845 | ib_sa_client_put(query->sa_query.client); |
2aec5c60 | 1846 | free_mad(&query->sa_query); |
34816ad9 SH |
1847 | |
1848 | err1: | |
1849 | kfree(query); | |
cbae32c5 HR |
1850 | return ret; |
1851 | } | |
1852 | EXPORT_SYMBOL(ib_sa_service_rec_query); | |
1853 | ||
1da177e4 LT |
1854 | static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, |
1855 | int status, | |
1856 | struct ib_sa_mad *mad) | |
1857 | { | |
1858 | struct ib_sa_mcmember_query *query = | |
1859 | container_of(sa_query, struct ib_sa_mcmember_query, sa_query); | |
1860 | ||
1861 | if (mad) { | |
1862 | struct ib_sa_mcmember_rec rec; | |
1863 | ||
1864 | ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), | |
1865 | mad->data, &rec); | |
1866 | query->callback(status, &rec, query->context); | |
1867 | } else | |
1868 | query->callback(status, NULL, query->context); | |
1869 | } | |
1870 | ||
1871 | static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) | |
1872 | { | |
1da177e4 LT |
1873 | kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); |
1874 | } | |
1875 | ||
c1a0b23b MT |
1876 | int ib_sa_mcmember_rec_query(struct ib_sa_client *client, |
1877 | struct ib_device *device, u8 port_num, | |
1da177e4 LT |
1878 | u8 method, |
1879 | struct ib_sa_mcmember_rec *rec, | |
1880 | ib_sa_comp_mask comp_mask, | |
dd0fc66f | 1881 | int timeout_ms, gfp_t gfp_mask, |
1da177e4 LT |
1882 | void (*callback)(int status, |
1883 | struct ib_sa_mcmember_rec *resp, | |
1884 | void *context), | |
1885 | void *context, | |
1886 | struct ib_sa_query **sa_query) | |
1887 | { | |
1888 | struct ib_sa_mcmember_query *query; | |
1889 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); | |
56c202d6 RD |
1890 | struct ib_sa_port *port; |
1891 | struct ib_mad_agent *agent; | |
34816ad9 | 1892 | struct ib_sa_mad *mad; |
1da177e4 LT |
1893 | int ret; |
1894 | ||
56c202d6 RD |
1895 | if (!sa_dev) |
1896 | return -ENODEV; | |
1897 | ||
1898 | port = &sa_dev->port[port_num - sa_dev->start_port]; | |
1899 | agent = port->agent; | |
1900 | ||
5d265770 | 1901 | query = kzalloc(sizeof(*query), gfp_mask); |
1da177e4 LT |
1902 | if (!query) |
1903 | return -ENOMEM; | |
34816ad9 | 1904 | |
2aec5c60 SH |
1905 | query->sa_query.port = port; |
1906 | ret = alloc_mad(&query->sa_query, gfp_mask); | |
1907 | if (ret) | |
34816ad9 | 1908 | goto err1; |
1da177e4 | 1909 | |
c1a0b23b MT |
1910 | ib_sa_client_get(client); |
1911 | query->sa_query.client = client; | |
1912 | query->callback = callback; | |
1913 | query->context = context; | |
1da177e4 | 1914 | |
34816ad9 | 1915 | mad = query->sa_query.mad_buf->mad; |
2196f271 | 1916 | init_mad(&query->sa_query, agent); |
1da177e4 | 1917 | |
34816ad9 SH |
1918 | query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; |
1919 | query->sa_query.release = ib_sa_mcmember_rec_release; | |
34816ad9 SH |
1920 | mad->mad_hdr.method = method; |
1921 | mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); | |
1922 | mad->sa_hdr.comp_mask = comp_mask; | |
1da177e4 LT |
1923 | |
1924 | ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), | |
34816ad9 | 1925 | rec, mad->data); |
1da177e4 LT |
1926 | |
1927 | *sa_query = &query->sa_query; | |
dae4c1d2 | 1928 | |
e322fedf | 1929 | ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); |
34816ad9 SH |
1930 | if (ret < 0) |
1931 | goto err2; | |
1da177e4 | 1932 | |
dae4c1d2 | 1933 | return ret; |
34816ad9 SH |
1934 | |
1935 | err2: | |
1936 | *sa_query = NULL; | |
c1a0b23b | 1937 | ib_sa_client_put(query->sa_query.client); |
2aec5c60 | 1938 | free_mad(&query->sa_query); |
34816ad9 SH |
1939 | |
1940 | err1: | |
1941 | kfree(query); | |
1942 | return ret; | |
1da177e4 | 1943 | } |
1da177e4 | 1944 | |
aeab97ed ES |
1945 | /* Support GuidInfoRecord */ |
1946 | static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, | |
1947 | int status, | |
1948 | struct ib_sa_mad *mad) | |
1949 | { | |
1950 | struct ib_sa_guidinfo_query *query = | |
1951 | container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); | |
1952 | ||
1953 | if (mad) { | |
1954 | struct ib_sa_guidinfo_rec rec; | |
1955 | ||
1956 | ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), | |
1957 | mad->data, &rec); | |
1958 | query->callback(status, &rec, query->context); | |
1959 | } else | |
1960 | query->callback(status, NULL, query->context); | |
1961 | } | |
1962 | ||
1963 | static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) | |
1964 | { | |
1965 | kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query)); | |
1966 | } | |
1967 | ||
1968 | int ib_sa_guid_info_rec_query(struct ib_sa_client *client, | |
1969 | struct ib_device *device, u8 port_num, | |
1970 | struct ib_sa_guidinfo_rec *rec, | |
1971 | ib_sa_comp_mask comp_mask, u8 method, | |
1972 | int timeout_ms, gfp_t gfp_mask, | |
1973 | void (*callback)(int status, | |
1974 | struct ib_sa_guidinfo_rec *resp, | |
1975 | void *context), | |
1976 | void *context, | |
1977 | struct ib_sa_query **sa_query) | |
1978 | { | |
1979 | struct ib_sa_guidinfo_query *query; | |
1980 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); | |
1981 | struct ib_sa_port *port; | |
1982 | struct ib_mad_agent *agent; | |
1983 | struct ib_sa_mad *mad; | |
1984 | int ret; | |
1985 | ||
1986 | if (!sa_dev) | |
1987 | return -ENODEV; | |
1988 | ||
1989 | if (method != IB_MGMT_METHOD_GET && | |
1990 | method != IB_MGMT_METHOD_SET && | |
1991 | method != IB_SA_METHOD_DELETE) { | |
1992 | return -EINVAL; | |
1993 | } | |
1994 | ||
1995 | port = &sa_dev->port[port_num - sa_dev->start_port]; | |
1996 | agent = port->agent; | |
1997 | ||
5d265770 | 1998 | query = kzalloc(sizeof(*query), gfp_mask); |
aeab97ed ES |
1999 | if (!query) |
2000 | return -ENOMEM; | |
2001 | ||
2002 | query->sa_query.port = port; | |
2003 | ret = alloc_mad(&query->sa_query, gfp_mask); | |
2004 | if (ret) | |
2005 | goto err1; | |
2006 | ||
2007 | ib_sa_client_get(client); | |
2008 | query->sa_query.client = client; | |
2009 | query->callback = callback; | |
2010 | query->context = context; | |
2011 | ||
2012 | mad = query->sa_query.mad_buf->mad; | |
2196f271 | 2013 | init_mad(&query->sa_query, agent); |
aeab97ed ES |
2014 | |
2015 | query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL; | |
2016 | query->sa_query.release = ib_sa_guidinfo_rec_release; | |
2017 | ||
2018 | mad->mad_hdr.method = method; | |
2019 | mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC); | |
2020 | mad->sa_hdr.comp_mask = comp_mask; | |
2021 | ||
2022 | ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec, | |
2023 | mad->data); | |
2024 | ||
2025 | *sa_query = &query->sa_query; | |
2026 | ||
2027 | ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); | |
2028 | if (ret < 0) | |
2029 | goto err2; | |
2030 | ||
2031 | return ret; | |
2032 | ||
2033 | err2: | |
2034 | *sa_query = NULL; | |
2035 | ib_sa_client_put(query->sa_query.client); | |
2036 | free_mad(&query->sa_query); | |
2037 | ||
2038 | err1: | |
2039 | kfree(query); | |
2040 | return ret; | |
2041 | } | |
2042 | EXPORT_SYMBOL(ib_sa_guid_info_rec_query); | |
2043 | ||
ee1c60b1 DC |
2044 | bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client, |
2045 | struct ib_device *device, | |
2046 | u8 port_num) | |
2047 | { | |
2048 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); | |
2049 | struct ib_sa_port *port; | |
2050 | bool ret = false; | |
2051 | unsigned long flags; | |
2052 | ||
2053 | if (!sa_dev) | |
2054 | return ret; | |
2055 | ||
2056 | port = &sa_dev->port[port_num - sa_dev->start_port]; | |
2057 | ||
2058 | spin_lock_irqsave(&port->classport_lock, flags); | |
2196f271 DC |
2059 | if ((port->classport_info.valid) && |
2060 | (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_IB)) | |
2061 | ret = ib_get_cpi_capmask2(&port->classport_info.data.ib) | |
2062 | & IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT; | |
ee1c60b1 DC |
2063 | spin_unlock_irqrestore(&port->classport_lock, flags); |
2064 | return ret; | |
2065 | } | |
2066 | EXPORT_SYMBOL(ib_sa_sendonly_fullmem_support); | |
2067 | ||
2068 | struct ib_classport_info_context { | |
2069 | struct completion done; | |
2070 | struct ib_sa_query *sa_query; | |
2071 | }; | |
2072 | ||
2073 | static void ib_classportinfo_cb(void *context) | |
2074 | { | |
2075 | struct ib_classport_info_context *cb_ctx = context; | |
2076 | ||
2077 | complete(&cb_ctx->done); | |
2078 | } | |
2079 | ||
628e6f75 ES |
2080 | static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query, |
2081 | int status, | |
2082 | struct ib_sa_mad *mad) | |
2083 | { | |
3d3fd742 | 2084 | unsigned long flags; |
628e6f75 ES |
2085 | struct ib_sa_classport_info_query *query = |
2086 | container_of(sa_query, struct ib_sa_classport_info_query, sa_query); | |
2196f271 | 2087 | struct ib_sa_classport_cache *info = &sa_query->port->classport_info; |
628e6f75 ES |
2088 | |
2089 | if (mad) { | |
2196f271 DC |
2090 | if (sa_query->flags & IB_SA_QUERY_OPA) { |
2091 | struct opa_class_port_info rec; | |
628e6f75 | 2092 | |
2196f271 DC |
2093 | ib_unpack(opa_classport_info_rec_table, |
2094 | ARRAY_SIZE(opa_classport_info_rec_table), | |
2095 | mad->data, &rec); | |
2096 | ||
2097 | spin_lock_irqsave(&sa_query->port->classport_lock, | |
2098 | flags); | |
2099 | if (!status && !info->valid) { | |
2100 | memcpy(&info->data.opa, &rec, | |
2101 | sizeof(info->data.opa)); | |
2102 | ||
2103 | info->valid = true; | |
2104 | info->data.type = RDMA_CLASS_PORT_INFO_OPA; | |
2105 | } | |
2106 | spin_unlock_irqrestore(&sa_query->port->classport_lock, | |
2107 | flags); | |
2108 | ||
2109 | } else { | |
2110 | struct ib_class_port_info rec; | |
3d3fd742 | 2111 | |
2196f271 DC |
2112 | ib_unpack(ib_classport_info_rec_table, |
2113 | ARRAY_SIZE(ib_classport_info_rec_table), | |
2114 | mad->data, &rec); | |
3d3fd742 | 2115 | |
2196f271 DC |
2116 | spin_lock_irqsave(&sa_query->port->classport_lock, |
2117 | flags); | |
2118 | if (!status && !info->valid) { | |
2119 | memcpy(&info->data.ib, &rec, | |
2120 | sizeof(info->data.ib)); | |
2121 | ||
2122 | info->valid = true; | |
2123 | info->data.type = RDMA_CLASS_PORT_INFO_IB; | |
2124 | } | |
2125 | spin_unlock_irqrestore(&sa_query->port->classport_lock, | |
2126 | flags); | |
3d3fd742 | 2127 | } |
628e6f75 | 2128 | } |
ee1c60b1 | 2129 | query->callback(query->context); |
628e6f75 ES |
2130 | } |
2131 | ||
ee1c60b1 | 2132 | static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query) |
628e6f75 ES |
2133 | { |
2134 | kfree(container_of(sa_query, struct ib_sa_classport_info_query, | |
2135 | sa_query)); | |
2136 | } | |
2137 | ||
ee1c60b1 DC |
2138 | static int ib_sa_classport_info_rec_query(struct ib_sa_port *port, |
2139 | int timeout_ms, | |
2140 | void (*callback)(void *context), | |
2141 | void *context, | |
2142 | struct ib_sa_query **sa_query) | |
628e6f75 | 2143 | { |
628e6f75 | 2144 | struct ib_mad_agent *agent; |
ee1c60b1 | 2145 | struct ib_sa_classport_info_query *query; |
628e6f75 | 2146 | struct ib_sa_mad *mad; |
ee1c60b1 | 2147 | gfp_t gfp_mask = GFP_KERNEL; |
628e6f75 ES |
2148 | int ret; |
2149 | ||
628e6f75 ES |
2150 | agent = port->agent; |
2151 | ||
2152 | query = kzalloc(sizeof(*query), gfp_mask); | |
2153 | if (!query) | |
2154 | return -ENOMEM; | |
2155 | ||
2156 | query->sa_query.port = port; | |
2196f271 DC |
2157 | query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device, |
2158 | port->port_num) ? | |
2159 | IB_SA_QUERY_OPA : 0; | |
628e6f75 ES |
2160 | ret = alloc_mad(&query->sa_query, gfp_mask); |
2161 | if (ret) | |
ee1c60b1 | 2162 | goto err_free; |
628e6f75 | 2163 | |
ee1c60b1 DC |
2164 | query->callback = callback; |
2165 | query->context = context; | |
628e6f75 ES |
2166 | |
2167 | mad = query->sa_query.mad_buf->mad; | |
2196f271 | 2168 | init_mad(&query->sa_query, agent); |
628e6f75 | 2169 | |
ee1c60b1 DC |
2170 | query->sa_query.callback = ib_sa_classport_info_rec_callback; |
2171 | query->sa_query.release = ib_sa_classport_info_rec_release; | |
628e6f75 ES |
2172 | mad->mad_hdr.method = IB_MGMT_METHOD_GET; |
2173 | mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO); | |
2174 | mad->sa_hdr.comp_mask = 0; | |
2175 | *sa_query = &query->sa_query; | |
2176 | ||
2177 | ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); | |
2178 | if (ret < 0) | |
ee1c60b1 | 2179 | goto err_free_mad; |
628e6f75 ES |
2180 | |
2181 | return ret; | |
2182 | ||
ee1c60b1 | 2183 | err_free_mad: |
628e6f75 | 2184 | *sa_query = NULL; |
628e6f75 ES |
2185 | free_mad(&query->sa_query); |
2186 | ||
ee1c60b1 | 2187 | err_free: |
628e6f75 ES |
2188 | kfree(query); |
2189 | return ret; | |
2190 | } | |
ee1c60b1 DC |
2191 | |
2192 | static void update_ib_cpi(struct work_struct *work) | |
2193 | { | |
2194 | struct ib_sa_port *port = | |
2195 | container_of(work, struct ib_sa_port, ib_cpi_work.work); | |
2196 | struct ib_classport_info_context *cb_context; | |
2197 | unsigned long flags; | |
2198 | int ret; | |
2199 | ||
2200 | /* If the classport info is valid, nothing | |
2201 | * to do here. | |
2202 | */ | |
2203 | spin_lock_irqsave(&port->classport_lock, flags); | |
2204 | if (port->classport_info.valid) { | |
2205 | spin_unlock_irqrestore(&port->classport_lock, flags); | |
2206 | return; | |
2207 | } | |
2208 | spin_unlock_irqrestore(&port->classport_lock, flags); | |
2209 | ||
2210 | cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL); | |
2211 | if (!cb_context) | |
2212 | goto err_nomem; | |
2213 | ||
2214 | init_completion(&cb_context->done); | |
2215 | ||
2216 | ret = ib_sa_classport_info_rec_query(port, 3000, | |
2217 | ib_classportinfo_cb, cb_context, | |
2218 | &cb_context->sa_query); | |
2219 | if (ret < 0) | |
2220 | goto free_cb_err; | |
2221 | wait_for_completion(&cb_context->done); | |
2222 | free_cb_err: | |
2223 | kfree(cb_context); | |
2224 | spin_lock_irqsave(&port->classport_lock, flags); | |
2225 | ||
2226 | /* If the classport info is still not valid, the query should have | |
2227 | * failed for some reason. Retry issuing the query | |
2228 | */ | |
2229 | if (!port->classport_info.valid) { | |
2230 | port->classport_info.retry_cnt++; | |
2231 | if (port->classport_info.retry_cnt <= | |
2232 | IB_SA_CPI_MAX_RETRY_CNT) { | |
2233 | unsigned long delay = | |
2234 | msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT); | |
2235 | ||
2236 | queue_delayed_work(ib_wq, &port->ib_cpi_work, delay); | |
2237 | } | |
2238 | } | |
2239 | spin_unlock_irqrestore(&port->classport_lock, flags); | |
2240 | ||
2241 | err_nomem: | |
2242 | return; | |
2243 | } | |
628e6f75 | 2244 | |
1da177e4 LT |
2245 | static void send_handler(struct ib_mad_agent *agent, |
2246 | struct ib_mad_send_wc *mad_send_wc) | |
2247 | { | |
34816ad9 | 2248 | struct ib_sa_query *query = mad_send_wc->send_buf->context[0]; |
1da177e4 LT |
2249 | unsigned long flags; |
2250 | ||
e4f50f00 RD |
2251 | if (query->callback) |
2252 | switch (mad_send_wc->status) { | |
2253 | case IB_WC_SUCCESS: | |
2254 | /* No callback -- already got recv */ | |
2255 | break; | |
2256 | case IB_WC_RESP_TIMEOUT_ERR: | |
2257 | query->callback(query, -ETIMEDOUT, NULL); | |
2258 | break; | |
2259 | case IB_WC_WR_FLUSH_ERR: | |
2260 | query->callback(query, -EINTR, NULL); | |
2261 | break; | |
2262 | default: | |
2263 | query->callback(query, -EIO, NULL); | |
2264 | break; | |
2265 | } | |
1da177e4 | 2266 | |
1da177e4 | 2267 | spin_lock_irqsave(&idr_lock, flags); |
34816ad9 | 2268 | idr_remove(&query_idr, query->id); |
1da177e4 | 2269 | spin_unlock_irqrestore(&idr_lock, flags); |
34816ad9 | 2270 | |
2aec5c60 | 2271 | free_mad(query); |
ee1c60b1 DC |
2272 | if (query->client) |
2273 | ib_sa_client_put(query->client); | |
34816ad9 | 2274 | query->release(query); |
1da177e4 LT |
2275 | } |
2276 | ||
2277 | static void recv_handler(struct ib_mad_agent *mad_agent, | |
ca281265 | 2278 | struct ib_mad_send_buf *send_buf, |
1da177e4 LT |
2279 | struct ib_mad_recv_wc *mad_recv_wc) |
2280 | { | |
2281 | struct ib_sa_query *query; | |
1da177e4 | 2282 | |
ca281265 CH |
2283 | if (!send_buf) |
2284 | return; | |
1da177e4 | 2285 | |
ca281265 | 2286 | query = send_buf->context[0]; |
34816ad9 | 2287 | if (query->callback) { |
1da177e4 LT |
2288 | if (mad_recv_wc->wc->status == IB_WC_SUCCESS) |
2289 | query->callback(query, | |
2290 | mad_recv_wc->recv_buf.mad->mad_hdr.status ? | |
2291 | -EINVAL : 0, | |
2292 | (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad); | |
2293 | else | |
2294 | query->callback(query, -EIO, NULL); | |
2295 | } | |
2296 | ||
2297 | ib_free_recv_mad(mad_recv_wc); | |
2298 | } | |
2299 | ||
cb863766 DC |
2300 | static void update_sm_ah(struct work_struct *work) |
2301 | { | |
2302 | struct ib_sa_port *port = | |
2303 | container_of(work, struct ib_sa_port, update_task); | |
2304 | struct ib_sa_sm_ah *new_ah; | |
2305 | struct ib_port_attr port_attr; | |
90898850 | 2306 | struct rdma_ah_attr ah_attr; |
cb863766 DC |
2307 | |
2308 | if (ib_query_port(port->agent->device, port->port_num, &port_attr)) { | |
2309 | pr_warn("Couldn't query port\n"); | |
2310 | return; | |
2311 | } | |
2312 | ||
2313 | new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL); | |
2314 | if (!new_ah) | |
2315 | return; | |
2316 | ||
2317 | kref_init(&new_ah->ref); | |
2318 | new_ah->src_path_mask = (1 << port_attr.lmc) - 1; | |
2319 | ||
2320 | new_ah->pkey_index = 0; | |
2321 | if (ib_find_pkey(port->agent->device, port->port_num, | |
2322 | IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index)) | |
2323 | pr_err("Couldn't find index for default PKey\n"); | |
2324 | ||
2325 | memset(&ah_attr, 0, sizeof(ah_attr)); | |
44c58487 DC |
2326 | ah_attr.type = rdma_ah_find_type(port->agent->device, |
2327 | port->port_num); | |
d8966fcd DC |
2328 | rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid); |
2329 | rdma_ah_set_sl(&ah_attr, port_attr.sm_sl); | |
2330 | rdma_ah_set_port_num(&ah_attr, port->port_num); | |
cb863766 | 2331 | if (port_attr.grh_required) { |
d98bb7f7 DH |
2332 | if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA) { |
2333 | rdma_ah_set_make_grd(&ah_attr, true); | |
2334 | } else { | |
2335 | rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH); | |
2336 | rdma_ah_set_subnet_prefix(&ah_attr, | |
2337 | cpu_to_be64(port_attr.subnet_prefix)); | |
2338 | rdma_ah_set_interface_id(&ah_attr, | |
2339 | cpu_to_be64(IB_SA_WELL_KNOWN_GUID)); | |
2340 | } | |
cb863766 DC |
2341 | } |
2342 | ||
0a18cfe4 | 2343 | new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr); |
cb863766 DC |
2344 | if (IS_ERR(new_ah->ah)) { |
2345 | pr_warn("Couldn't create new SM AH\n"); | |
2346 | kfree(new_ah); | |
2347 | return; | |
2348 | } | |
2349 | ||
2350 | spin_lock_irq(&port->ah_lock); | |
2351 | if (port->sm_ah) | |
2352 | kref_put(&port->sm_ah->ref, free_sm_ah); | |
2353 | port->sm_ah = new_ah; | |
2354 | spin_unlock_irq(&port->ah_lock); | |
2355 | } | |
2356 | ||
2357 | static void ib_sa_event(struct ib_event_handler *handler, | |
2358 | struct ib_event *event) | |
2359 | { | |
2360 | if (event->event == IB_EVENT_PORT_ERR || | |
2361 | event->event == IB_EVENT_PORT_ACTIVE || | |
2362 | event->event == IB_EVENT_LID_CHANGE || | |
2363 | event->event == IB_EVENT_PKEY_CHANGE || | |
2364 | event->event == IB_EVENT_SM_CHANGE || | |
2365 | event->event == IB_EVENT_CLIENT_REREGISTER) { | |
2366 | unsigned long flags; | |
2367 | struct ib_sa_device *sa_dev = | |
2368 | container_of(handler, typeof(*sa_dev), event_handler); | |
2369 | u8 port_num = event->element.port_num - sa_dev->start_port; | |
2370 | struct ib_sa_port *port = &sa_dev->port[port_num]; | |
2371 | ||
2372 | if (!rdma_cap_ib_sa(handler->device, port->port_num)) | |
2373 | return; | |
2374 | ||
2375 | spin_lock_irqsave(&port->ah_lock, flags); | |
2376 | if (port->sm_ah) | |
2377 | kref_put(&port->sm_ah->ref, free_sm_ah); | |
2378 | port->sm_ah = NULL; | |
2379 | spin_unlock_irqrestore(&port->ah_lock, flags); | |
2380 | ||
2381 | if (event->event == IB_EVENT_SM_CHANGE || | |
2382 | event->event == IB_EVENT_CLIENT_REREGISTER || | |
ee1c60b1 DC |
2383 | event->event == IB_EVENT_LID_CHANGE || |
2384 | event->event == IB_EVENT_PORT_ACTIVE) { | |
2385 | unsigned long delay = | |
2386 | msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT); | |
2387 | ||
cb863766 DC |
2388 | spin_lock_irqsave(&port->classport_lock, flags); |
2389 | port->classport_info.valid = false; | |
ee1c60b1 | 2390 | port->classport_info.retry_cnt = 0; |
cb863766 | 2391 | spin_unlock_irqrestore(&port->classport_lock, flags); |
ee1c60b1 DC |
2392 | queue_delayed_work(ib_wq, |
2393 | &port->ib_cpi_work, delay); | |
cb863766 DC |
2394 | } |
2395 | queue_work(ib_wq, &sa_dev->port[port_num].update_task); | |
2396 | } | |
2397 | } | |
2398 | ||
1da177e4 LT |
2399 | static void ib_sa_add_one(struct ib_device *device) |
2400 | { | |
2401 | struct ib_sa_device *sa_dev; | |
2402 | int s, e, i; | |
08e3681a | 2403 | int count = 0; |
07ebafba | 2404 | |
4139032b HR |
2405 | s = rdma_start_port(device); |
2406 | e = rdma_end_port(device); | |
1da177e4 | 2407 | |
fac70d51 | 2408 | sa_dev = kzalloc(sizeof *sa_dev + |
1da177e4 LT |
2409 | (e - s + 1) * sizeof (struct ib_sa_port), |
2410 | GFP_KERNEL); | |
2411 | if (!sa_dev) | |
2412 | return; | |
2413 | ||
2414 | sa_dev->start_port = s; | |
2415 | sa_dev->end_port = e; | |
2416 | ||
2417 | for (i = 0; i <= e - s; ++i) { | |
fac70d51 | 2418 | spin_lock_init(&sa_dev->port[i].ah_lock); |
fe53ba2f | 2419 | if (!rdma_cap_ib_sa(device, i + 1)) |
fac70d51 EC |
2420 | continue; |
2421 | ||
1da177e4 LT |
2422 | sa_dev->port[i].sm_ah = NULL; |
2423 | sa_dev->port[i].port_num = i + s; | |
1da177e4 | 2424 | |
3d3fd742 AV |
2425 | spin_lock_init(&sa_dev->port[i].classport_lock); |
2426 | sa_dev->port[i].classport_info.valid = false; | |
2427 | ||
1da177e4 LT |
2428 | sa_dev->port[i].agent = |
2429 | ib_register_mad_agent(device, i + s, IB_QPT_GSI, | |
2430 | NULL, 0, send_handler, | |
0f29b46d | 2431 | recv_handler, sa_dev, 0); |
1da177e4 LT |
2432 | if (IS_ERR(sa_dev->port[i].agent)) |
2433 | goto err; | |
2434 | ||
c4028958 | 2435 | INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); |
ee1c60b1 DC |
2436 | INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work, |
2437 | update_ib_cpi); | |
08e3681a MW |
2438 | |
2439 | count++; | |
1da177e4 LT |
2440 | } |
2441 | ||
08e3681a MW |
2442 | if (!count) |
2443 | goto free; | |
2444 | ||
1da177e4 LT |
2445 | ib_set_client_data(device, &sa_client, sa_dev); |
2446 | ||
2447 | /* | |
2448 | * We register our event handler after everything is set up, | |
2449 | * and then update our cached info after the event handler is | |
2450 | * registered to avoid any problems if a port changes state | |
2451 | * during our initialization. | |
2452 | */ | |
2453 | ||
2454 | INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event); | |
dcc9881e | 2455 | ib_register_event_handler(&sa_dev->event_handler); |
1da177e4 | 2456 | |
08e3681a | 2457 | for (i = 0; i <= e - s; ++i) { |
fe53ba2f | 2458 | if (rdma_cap_ib_sa(device, i + 1)) |
fac70d51 | 2459 | update_sm_ah(&sa_dev->port[i].update_task); |
08e3681a | 2460 | } |
1da177e4 LT |
2461 | |
2462 | return; | |
2463 | ||
2464 | err: | |
08e3681a | 2465 | while (--i >= 0) { |
fe53ba2f | 2466 | if (rdma_cap_ib_sa(device, i + 1)) |
fac70d51 | 2467 | ib_unregister_mad_agent(sa_dev->port[i].agent); |
08e3681a MW |
2468 | } |
2469 | free: | |
1da177e4 | 2470 | kfree(sa_dev); |
1da177e4 LT |
2471 | return; |
2472 | } | |
2473 | ||
7c1eb45a | 2474 | static void ib_sa_remove_one(struct ib_device *device, void *client_data) |
1da177e4 | 2475 | { |
7c1eb45a | 2476 | struct ib_sa_device *sa_dev = client_data; |
1da177e4 LT |
2477 | int i; |
2478 | ||
2479 | if (!sa_dev) | |
2480 | return; | |
2481 | ||
2482 | ib_unregister_event_handler(&sa_dev->event_handler); | |
96e61fa5 | 2483 | flush_workqueue(ib_wq); |
0f47ae0b | 2484 | |
1da177e4 | 2485 | for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { |
fe53ba2f | 2486 | if (rdma_cap_ib_sa(device, i + 1)) { |
ee1c60b1 | 2487 | cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work); |
fac70d51 EC |
2488 | ib_unregister_mad_agent(sa_dev->port[i].agent); |
2489 | if (sa_dev->port[i].sm_ah) | |
2490 | kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); | |
2491 | } | |
2492 | ||
1da177e4 LT |
2493 | } |
2494 | ||
2495 | kfree(sa_dev); | |
2496 | } | |
2497 | ||
c2e49c92 | 2498 | int ib_sa_init(void) |
1da177e4 LT |
2499 | { |
2500 | int ret; | |
2501 | ||
1da177e4 LT |
2502 | get_random_bytes(&tid, sizeof tid); |
2503 | ||
2ca546b9 KW |
2504 | atomic_set(&ib_nl_sa_request_seq, 0); |
2505 | ||
1da177e4 | 2506 | ret = ib_register_client(&sa_client); |
faec2f7b | 2507 | if (ret) { |
aba25a3e | 2508 | pr_err("Couldn't register ib_sa client\n"); |
faec2f7b SH |
2509 | goto err1; |
2510 | } | |
2511 | ||
2512 | ret = mcast_init(); | |
2513 | if (ret) { | |
aba25a3e | 2514 | pr_err("Couldn't initialize multicast handling\n"); |
faec2f7b SH |
2515 | goto err2; |
2516 | } | |
1da177e4 | 2517 | |
4534d859 | 2518 | ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM); |
2ca546b9 KW |
2519 | if (!ib_nl_wq) { |
2520 | ret = -ENOMEM; | |
2521 | goto err3; | |
2522 | } | |
2523 | ||
2ca546b9 KW |
2524 | INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout); |
2525 | ||
faec2f7b | 2526 | return 0; |
735c631a | 2527 | |
2ca546b9 KW |
2528 | err3: |
2529 | mcast_cleanup(); | |
faec2f7b SH |
2530 | err2: |
2531 | ib_unregister_client(&sa_client); | |
2532 | err1: | |
1da177e4 LT |
2533 | return ret; |
2534 | } | |
2535 | ||
c2e49c92 | 2536 | void ib_sa_cleanup(void) |
1da177e4 | 2537 | { |
2ca546b9 KW |
2538 | cancel_delayed_work(&ib_nl_timed_work); |
2539 | flush_workqueue(ib_nl_wq); | |
2540 | destroy_workqueue(ib_nl_wq); | |
faec2f7b | 2541 | mcast_cleanup(); |
1da177e4 | 2542 | ib_unregister_client(&sa_client); |
5d7edb3c | 2543 | idr_destroy(&query_idr); |
1da177e4 | 2544 | } |