]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | |
2a1d9b7f | 3 | * Copyright (c) 2005 Voltaire, Inc. All rights reserved. |
1da177e4 LT |
4 | * |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | * | |
cbae32c5 | 33 | * $Id: sa_query.c 2811 2005-07-06 18:11:43Z halr $ |
1da177e4 LT |
34 | */ |
35 | ||
36 | #include <linux/module.h> | |
37 | #include <linux/init.h> | |
38 | #include <linux/err.h> | |
39 | #include <linux/random.h> | |
40 | #include <linux/spinlock.h> | |
41 | #include <linux/slab.h> | |
42 | #include <linux/pci.h> | |
43 | #include <linux/dma-mapping.h> | |
44 | #include <linux/kref.h> | |
45 | #include <linux/idr.h> | |
4e57b681 | 46 | #include <linux/workqueue.h> |
1da177e4 | 47 | |
a4d61e84 RD |
48 | #include <rdma/ib_pack.h> |
49 | #include <rdma/ib_sa.h> | |
1da177e4 LT |
50 | |
51 | MODULE_AUTHOR("Roland Dreier"); | |
52 | MODULE_DESCRIPTION("InfiniBand subnet administration query support"); | |
53 | MODULE_LICENSE("Dual BSD/GPL"); | |
54 | ||
1da177e4 LT |
55 | struct ib_sa_sm_ah { |
56 | struct ib_ah *ah; | |
57 | struct kref ref; | |
58 | }; | |
59 | ||
60 | struct ib_sa_port { | |
61 | struct ib_mad_agent *agent; | |
1da177e4 LT |
62 | struct ib_sa_sm_ah *sm_ah; |
63 | struct work_struct update_task; | |
64 | spinlock_t ah_lock; | |
65 | u8 port_num; | |
66 | }; | |
67 | ||
68 | struct ib_sa_device { | |
69 | int start_port, end_port; | |
70 | struct ib_event_handler event_handler; | |
71 | struct ib_sa_port port[0]; | |
72 | }; | |
73 | ||
74 | struct ib_sa_query { | |
75 | void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); | |
76 | void (*release)(struct ib_sa_query *); | |
34816ad9 SH |
77 | struct ib_sa_port *port; |
78 | struct ib_mad_send_buf *mad_buf; | |
79 | struct ib_sa_sm_ah *sm_ah; | |
80 | int id; | |
1da177e4 LT |
81 | }; |
82 | ||
cbae32c5 HR |
83 | struct ib_sa_service_query { |
84 | void (*callback)(int, struct ib_sa_service_rec *, void *); | |
85 | void *context; | |
86 | struct ib_sa_query sa_query; | |
87 | }; | |
88 | ||
1da177e4 LT |
89 | struct ib_sa_path_query { |
90 | void (*callback)(int, struct ib_sa_path_rec *, void *); | |
91 | void *context; | |
92 | struct ib_sa_query sa_query; | |
93 | }; | |
94 | ||
95 | struct ib_sa_mcmember_query { | |
96 | void (*callback)(int, struct ib_sa_mcmember_rec *, void *); | |
97 | void *context; | |
98 | struct ib_sa_query sa_query; | |
99 | }; | |
100 | ||
101 | static void ib_sa_add_one(struct ib_device *device); | |
102 | static void ib_sa_remove_one(struct ib_device *device); | |
103 | ||
104 | static struct ib_client sa_client = { | |
105 | .name = "sa", | |
106 | .add = ib_sa_add_one, | |
107 | .remove = ib_sa_remove_one | |
108 | }; | |
109 | ||
110 | static spinlock_t idr_lock; | |
111 | static DEFINE_IDR(query_idr); | |
112 | ||
113 | static spinlock_t tid_lock; | |
114 | static u32 tid; | |
115 | ||
1da177e4 LT |
116 | #define PATH_REC_FIELD(field) \ |
117 | .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \ | |
118 | .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \ | |
119 | .field_name = "sa_path_rec:" #field | |
120 | ||
121 | static const struct ib_field path_rec_table[] = { | |
122 | { RESERVED, | |
123 | .offset_words = 0, | |
124 | .offset_bits = 0, | |
125 | .size_bits = 32 }, | |
126 | { RESERVED, | |
127 | .offset_words = 1, | |
128 | .offset_bits = 0, | |
129 | .size_bits = 32 }, | |
130 | { PATH_REC_FIELD(dgid), | |
131 | .offset_words = 2, | |
132 | .offset_bits = 0, | |
133 | .size_bits = 128 }, | |
134 | { PATH_REC_FIELD(sgid), | |
135 | .offset_words = 6, | |
136 | .offset_bits = 0, | |
137 | .size_bits = 128 }, | |
138 | { PATH_REC_FIELD(dlid), | |
139 | .offset_words = 10, | |
140 | .offset_bits = 0, | |
141 | .size_bits = 16 }, | |
142 | { PATH_REC_FIELD(slid), | |
143 | .offset_words = 10, | |
144 | .offset_bits = 16, | |
145 | .size_bits = 16 }, | |
146 | { PATH_REC_FIELD(raw_traffic), | |
147 | .offset_words = 11, | |
148 | .offset_bits = 0, | |
149 | .size_bits = 1 }, | |
150 | { RESERVED, | |
151 | .offset_words = 11, | |
152 | .offset_bits = 1, | |
153 | .size_bits = 3 }, | |
154 | { PATH_REC_FIELD(flow_label), | |
155 | .offset_words = 11, | |
156 | .offset_bits = 4, | |
157 | .size_bits = 20 }, | |
158 | { PATH_REC_FIELD(hop_limit), | |
159 | .offset_words = 11, | |
160 | .offset_bits = 24, | |
161 | .size_bits = 8 }, | |
162 | { PATH_REC_FIELD(traffic_class), | |
163 | .offset_words = 12, | |
164 | .offset_bits = 0, | |
165 | .size_bits = 8 }, | |
166 | { PATH_REC_FIELD(reversible), | |
167 | .offset_words = 12, | |
168 | .offset_bits = 8, | |
169 | .size_bits = 1 }, | |
170 | { PATH_REC_FIELD(numb_path), | |
171 | .offset_words = 12, | |
172 | .offset_bits = 9, | |
173 | .size_bits = 7 }, | |
174 | { PATH_REC_FIELD(pkey), | |
175 | .offset_words = 12, | |
176 | .offset_bits = 16, | |
177 | .size_bits = 16 }, | |
178 | { RESERVED, | |
179 | .offset_words = 13, | |
180 | .offset_bits = 0, | |
181 | .size_bits = 12 }, | |
182 | { PATH_REC_FIELD(sl), | |
183 | .offset_words = 13, | |
184 | .offset_bits = 12, | |
185 | .size_bits = 4 }, | |
186 | { PATH_REC_FIELD(mtu_selector), | |
187 | .offset_words = 13, | |
188 | .offset_bits = 16, | |
189 | .size_bits = 2 }, | |
190 | { PATH_REC_FIELD(mtu), | |
191 | .offset_words = 13, | |
192 | .offset_bits = 18, | |
193 | .size_bits = 6 }, | |
194 | { PATH_REC_FIELD(rate_selector), | |
195 | .offset_words = 13, | |
196 | .offset_bits = 24, | |
197 | .size_bits = 2 }, | |
198 | { PATH_REC_FIELD(rate), | |
199 | .offset_words = 13, | |
200 | .offset_bits = 26, | |
201 | .size_bits = 6 }, | |
202 | { PATH_REC_FIELD(packet_life_time_selector), | |
203 | .offset_words = 14, | |
204 | .offset_bits = 0, | |
205 | .size_bits = 2 }, | |
206 | { PATH_REC_FIELD(packet_life_time), | |
207 | .offset_words = 14, | |
208 | .offset_bits = 2, | |
209 | .size_bits = 6 }, | |
210 | { PATH_REC_FIELD(preference), | |
211 | .offset_words = 14, | |
212 | .offset_bits = 8, | |
213 | .size_bits = 8 }, | |
214 | { RESERVED, | |
215 | .offset_words = 14, | |
216 | .offset_bits = 16, | |
217 | .size_bits = 48 }, | |
218 | }; | |
219 | ||
220 | #define MCMEMBER_REC_FIELD(field) \ | |
221 | .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \ | |
222 | .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \ | |
223 | .field_name = "sa_mcmember_rec:" #field | |
224 | ||
225 | static const struct ib_field mcmember_rec_table[] = { | |
226 | { MCMEMBER_REC_FIELD(mgid), | |
227 | .offset_words = 0, | |
228 | .offset_bits = 0, | |
229 | .size_bits = 128 }, | |
230 | { MCMEMBER_REC_FIELD(port_gid), | |
231 | .offset_words = 4, | |
232 | .offset_bits = 0, | |
233 | .size_bits = 128 }, | |
234 | { MCMEMBER_REC_FIELD(qkey), | |
235 | .offset_words = 8, | |
236 | .offset_bits = 0, | |
237 | .size_bits = 32 }, | |
238 | { MCMEMBER_REC_FIELD(mlid), | |
239 | .offset_words = 9, | |
240 | .offset_bits = 0, | |
241 | .size_bits = 16 }, | |
242 | { MCMEMBER_REC_FIELD(mtu_selector), | |
243 | .offset_words = 9, | |
244 | .offset_bits = 16, | |
245 | .size_bits = 2 }, | |
246 | { MCMEMBER_REC_FIELD(mtu), | |
247 | .offset_words = 9, | |
248 | .offset_bits = 18, | |
249 | .size_bits = 6 }, | |
250 | { MCMEMBER_REC_FIELD(traffic_class), | |
251 | .offset_words = 9, | |
252 | .offset_bits = 24, | |
253 | .size_bits = 8 }, | |
254 | { MCMEMBER_REC_FIELD(pkey), | |
255 | .offset_words = 10, | |
256 | .offset_bits = 0, | |
257 | .size_bits = 16 }, | |
258 | { MCMEMBER_REC_FIELD(rate_selector), | |
259 | .offset_words = 10, | |
260 | .offset_bits = 16, | |
261 | .size_bits = 2 }, | |
262 | { MCMEMBER_REC_FIELD(rate), | |
263 | .offset_words = 10, | |
264 | .offset_bits = 18, | |
265 | .size_bits = 6 }, | |
266 | { MCMEMBER_REC_FIELD(packet_life_time_selector), | |
267 | .offset_words = 10, | |
268 | .offset_bits = 24, | |
269 | .size_bits = 2 }, | |
270 | { MCMEMBER_REC_FIELD(packet_life_time), | |
271 | .offset_words = 10, | |
272 | .offset_bits = 26, | |
273 | .size_bits = 6 }, | |
274 | { MCMEMBER_REC_FIELD(sl), | |
275 | .offset_words = 11, | |
276 | .offset_bits = 0, | |
277 | .size_bits = 4 }, | |
278 | { MCMEMBER_REC_FIELD(flow_label), | |
279 | .offset_words = 11, | |
280 | .offset_bits = 4, | |
281 | .size_bits = 20 }, | |
282 | { MCMEMBER_REC_FIELD(hop_limit), | |
283 | .offset_words = 11, | |
284 | .offset_bits = 24, | |
285 | .size_bits = 8 }, | |
286 | { MCMEMBER_REC_FIELD(scope), | |
287 | .offset_words = 12, | |
288 | .offset_bits = 0, | |
289 | .size_bits = 4 }, | |
290 | { MCMEMBER_REC_FIELD(join_state), | |
291 | .offset_words = 12, | |
292 | .offset_bits = 4, | |
293 | .size_bits = 4 }, | |
294 | { MCMEMBER_REC_FIELD(proxy_join), | |
295 | .offset_words = 12, | |
296 | .offset_bits = 8, | |
297 | .size_bits = 1 }, | |
298 | { RESERVED, | |
299 | .offset_words = 12, | |
300 | .offset_bits = 9, | |
301 | .size_bits = 23 }, | |
302 | }; | |
303 | ||
cbae32c5 HR |
304 | #define SERVICE_REC_FIELD(field) \ |
305 | .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \ | |
306 | .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \ | |
307 | .field_name = "sa_service_rec:" #field | |
308 | ||
309 | static const struct ib_field service_rec_table[] = { | |
310 | { SERVICE_REC_FIELD(id), | |
311 | .offset_words = 0, | |
312 | .offset_bits = 0, | |
313 | .size_bits = 64 }, | |
314 | { SERVICE_REC_FIELD(gid), | |
315 | .offset_words = 2, | |
316 | .offset_bits = 0, | |
317 | .size_bits = 128 }, | |
318 | { SERVICE_REC_FIELD(pkey), | |
319 | .offset_words = 6, | |
320 | .offset_bits = 0, | |
321 | .size_bits = 16 }, | |
322 | { SERVICE_REC_FIELD(lease), | |
323 | .offset_words = 7, | |
324 | .offset_bits = 0, | |
325 | .size_bits = 32 }, | |
326 | { SERVICE_REC_FIELD(key), | |
327 | .offset_words = 8, | |
328 | .offset_bits = 0, | |
329 | .size_bits = 128 }, | |
330 | { SERVICE_REC_FIELD(name), | |
331 | .offset_words = 12, | |
332 | .offset_bits = 0, | |
333 | .size_bits = 64*8 }, | |
334 | { SERVICE_REC_FIELD(data8), | |
335 | .offset_words = 28, | |
336 | .offset_bits = 0, | |
337 | .size_bits = 16*8 }, | |
338 | { SERVICE_REC_FIELD(data16), | |
339 | .offset_words = 32, | |
340 | .offset_bits = 0, | |
341 | .size_bits = 8*16 }, | |
342 | { SERVICE_REC_FIELD(data32), | |
343 | .offset_words = 36, | |
344 | .offset_bits = 0, | |
345 | .size_bits = 4*32 }, | |
346 | { SERVICE_REC_FIELD(data64), | |
347 | .offset_words = 40, | |
348 | .offset_bits = 0, | |
349 | .size_bits = 2*64 }, | |
350 | }; | |
351 | ||
1da177e4 LT |
352 | static void free_sm_ah(struct kref *kref) |
353 | { | |
354 | struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); | |
355 | ||
356 | ib_destroy_ah(sm_ah->ah); | |
357 | kfree(sm_ah); | |
358 | } | |
359 | ||
360 | static void update_sm_ah(void *port_ptr) | |
361 | { | |
362 | struct ib_sa_port *port = port_ptr; | |
363 | struct ib_sa_sm_ah *new_ah, *old_ah; | |
364 | struct ib_port_attr port_attr; | |
365 | struct ib_ah_attr ah_attr; | |
366 | ||
367 | if (ib_query_port(port->agent->device, port->port_num, &port_attr)) { | |
368 | printk(KERN_WARNING "Couldn't query port\n"); | |
369 | return; | |
370 | } | |
371 | ||
372 | new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL); | |
373 | if (!new_ah) { | |
374 | printk(KERN_WARNING "Couldn't allocate new SM AH\n"); | |
375 | return; | |
376 | } | |
377 | ||
378 | kref_init(&new_ah->ref); | |
379 | ||
380 | memset(&ah_attr, 0, sizeof ah_attr); | |
381 | ah_attr.dlid = port_attr.sm_lid; | |
382 | ah_attr.sl = port_attr.sm_sl; | |
383 | ah_attr.port_num = port->port_num; | |
384 | ||
385 | new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr); | |
386 | if (IS_ERR(new_ah->ah)) { | |
387 | printk(KERN_WARNING "Couldn't create new SM AH\n"); | |
388 | kfree(new_ah); | |
389 | return; | |
390 | } | |
391 | ||
392 | spin_lock_irq(&port->ah_lock); | |
393 | old_ah = port->sm_ah; | |
394 | port->sm_ah = new_ah; | |
395 | spin_unlock_irq(&port->ah_lock); | |
396 | ||
397 | if (old_ah) | |
398 | kref_put(&old_ah->ref, free_sm_ah); | |
399 | } | |
400 | ||
401 | static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event) | |
402 | { | |
403 | if (event->event == IB_EVENT_PORT_ERR || | |
404 | event->event == IB_EVENT_PORT_ACTIVE || | |
405 | event->event == IB_EVENT_LID_CHANGE || | |
406 | event->event == IB_EVENT_PKEY_CHANGE || | |
407 | event->event == IB_EVENT_SM_CHANGE) { | |
1d6801f9 MT |
408 | struct ib_sa_device *sa_dev; |
409 | sa_dev = container_of(handler, typeof(*sa_dev), event_handler); | |
1da177e4 LT |
410 | |
411 | schedule_work(&sa_dev->port[event->element.port_num - | |
412 | sa_dev->start_port].update_task); | |
413 | } | |
414 | } | |
415 | ||
416 | /** | |
417 | * ib_sa_cancel_query - try to cancel an SA query | |
418 | * @id:ID of query to cancel | |
419 | * @query:query pointer to cancel | |
420 | * | |
421 | * Try to cancel an SA query. If the id and query don't match up or | |
422 | * the query has already completed, nothing is done. Otherwise the | |
423 | * query is canceled and will complete with a status of -EINTR. | |
424 | */ | |
425 | void ib_sa_cancel_query(int id, struct ib_sa_query *query) | |
426 | { | |
427 | unsigned long flags; | |
428 | struct ib_mad_agent *agent; | |
34816ad9 | 429 | struct ib_mad_send_buf *mad_buf; |
1da177e4 LT |
430 | |
431 | spin_lock_irqsave(&idr_lock, flags); | |
432 | if (idr_find(&query_idr, id) != query) { | |
433 | spin_unlock_irqrestore(&idr_lock, flags); | |
434 | return; | |
435 | } | |
436 | agent = query->port->agent; | |
34816ad9 | 437 | mad_buf = query->mad_buf; |
1da177e4 LT |
438 | spin_unlock_irqrestore(&idr_lock, flags); |
439 | ||
34816ad9 | 440 | ib_cancel_mad(agent, mad_buf); |
1da177e4 LT |
441 | } |
442 | EXPORT_SYMBOL(ib_sa_cancel_query); | |
443 | ||
444 | static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent) | |
445 | { | |
446 | unsigned long flags; | |
447 | ||
448 | memset(mad, 0, sizeof *mad); | |
449 | ||
450 | mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION; | |
451 | mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; | |
452 | mad->mad_hdr.class_version = IB_SA_CLASS_VERSION; | |
453 | ||
454 | spin_lock_irqsave(&tid_lock, flags); | |
455 | mad->mad_hdr.tid = | |
456 | cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++); | |
457 | spin_unlock_irqrestore(&tid_lock, flags); | |
458 | } | |
459 | ||
460 | static int send_mad(struct ib_sa_query *query, int timeout_ms) | |
461 | { | |
1da177e4 | 462 | unsigned long flags; |
34816ad9 | 463 | int ret, id; |
1da177e4 LT |
464 | |
465 | retry: | |
466 | if (!idr_pre_get(&query_idr, GFP_ATOMIC)) | |
467 | return -ENOMEM; | |
468 | spin_lock_irqsave(&idr_lock, flags); | |
34816ad9 | 469 | ret = idr_get_new(&query_idr, query, &id); |
1da177e4 LT |
470 | spin_unlock_irqrestore(&idr_lock, flags); |
471 | if (ret == -EAGAIN) | |
472 | goto retry; | |
473 | if (ret) | |
474 | return ret; | |
475 | ||
34816ad9 SH |
476 | query->mad_buf->timeout_ms = timeout_ms; |
477 | query->mad_buf->context[0] = query; | |
478 | query->id = id; | |
1da177e4 | 479 | |
34816ad9 SH |
480 | spin_lock_irqsave(&query->port->ah_lock, flags); |
481 | kref_get(&query->port->sm_ah->ref); | |
482 | query->sm_ah = query->port->sm_ah; | |
483 | spin_unlock_irqrestore(&query->port->ah_lock, flags); | |
1da177e4 | 484 | |
34816ad9 | 485 | query->mad_buf->ah = query->sm_ah->ah; |
1da177e4 | 486 | |
34816ad9 | 487 | ret = ib_post_send_mad(query->mad_buf, NULL); |
1da177e4 | 488 | if (ret) { |
1da177e4 | 489 | spin_lock_irqsave(&idr_lock, flags); |
34816ad9 | 490 | idr_remove(&query_idr, id); |
1da177e4 | 491 | spin_unlock_irqrestore(&idr_lock, flags); |
34816ad9 SH |
492 | |
493 | kref_put(&query->sm_ah->ref, free_sm_ah); | |
1da177e4 LT |
494 | } |
495 | ||
dae4c1d2 RD |
496 | /* |
497 | * It's not safe to dereference query any more, because the | |
498 | * send may already have completed and freed the query in | |
34816ad9 | 499 | * another context. |
dae4c1d2 | 500 | */ |
34816ad9 | 501 | return ret ? ret : id; |
1da177e4 LT |
502 | } |
503 | ||
504 | static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, | |
505 | int status, | |
506 | struct ib_sa_mad *mad) | |
507 | { | |
508 | struct ib_sa_path_query *query = | |
509 | container_of(sa_query, struct ib_sa_path_query, sa_query); | |
510 | ||
511 | if (mad) { | |
512 | struct ib_sa_path_rec rec; | |
513 | ||
514 | ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), | |
515 | mad->data, &rec); | |
516 | query->callback(status, &rec, query->context); | |
517 | } else | |
518 | query->callback(status, NULL, query->context); | |
519 | } | |
520 | ||
521 | static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) | |
522 | { | |
1da177e4 LT |
523 | kfree(container_of(sa_query, struct ib_sa_path_query, sa_query)); |
524 | } | |
525 | ||
526 | /** | |
527 | * ib_sa_path_rec_get - Start a Path get query | |
528 | * @device:device to send query on | |
529 | * @port_num: port number to send query on | |
530 | * @rec:Path Record to send in query | |
531 | * @comp_mask:component mask to send in query | |
532 | * @timeout_ms:time to wait for response | |
533 | * @gfp_mask:GFP mask to use for internal allocations | |
534 | * @callback:function called when query completes, times out or is | |
535 | * canceled | |
536 | * @context:opaque user context passed to callback | |
537 | * @sa_query:query context, used to cancel query | |
538 | * | |
539 | * Send a Path Record Get query to the SA to look up a path. The | |
540 | * callback function will be called when the query completes (or | |
541 | * fails); status is 0 for a successful response, -EINTR if the query | |
542 | * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error | |
543 | * occurred sending the query. The resp parameter of the callback is | |
544 | * only valid if status is 0. | |
545 | * | |
546 | * If the return value of ib_sa_path_rec_get() is negative, it is an | |
547 | * error code. Otherwise it is a query ID that can be used to cancel | |
548 | * the query. | |
549 | */ | |
550 | int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, | |
551 | struct ib_sa_path_rec *rec, | |
552 | ib_sa_comp_mask comp_mask, | |
dd0fc66f | 553 | int timeout_ms, gfp_t gfp_mask, |
1da177e4 LT |
554 | void (*callback)(int status, |
555 | struct ib_sa_path_rec *resp, | |
556 | void *context), | |
557 | void *context, | |
558 | struct ib_sa_query **sa_query) | |
559 | { | |
560 | struct ib_sa_path_query *query; | |
561 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); | |
56c202d6 RD |
562 | struct ib_sa_port *port; |
563 | struct ib_mad_agent *agent; | |
34816ad9 | 564 | struct ib_sa_mad *mad; |
1da177e4 LT |
565 | int ret; |
566 | ||
56c202d6 RD |
567 | if (!sa_dev) |
568 | return -ENODEV; | |
569 | ||
570 | port = &sa_dev->port[port_num - sa_dev->start_port]; | |
571 | agent = port->agent; | |
572 | ||
1da177e4 LT |
573 | query = kmalloc(sizeof *query, gfp_mask); |
574 | if (!query) | |
575 | return -ENOMEM; | |
34816ad9 SH |
576 | |
577 | query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0, | |
578 | 0, IB_MGMT_SA_HDR, | |
579 | IB_MGMT_SA_DATA, gfp_mask); | |
580 | if (!query->sa_query.mad_buf) { | |
581 | ret = -ENOMEM; | |
582 | goto err1; | |
1da177e4 LT |
583 | } |
584 | ||
585 | query->callback = callback; | |
586 | query->context = context; | |
587 | ||
34816ad9 SH |
588 | mad = query->sa_query.mad_buf->mad; |
589 | init_mad(mad, agent); | |
1da177e4 | 590 | |
34816ad9 SH |
591 | query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; |
592 | query->sa_query.release = ib_sa_path_rec_release; | |
593 | query->sa_query.port = port; | |
594 | mad->mad_hdr.method = IB_MGMT_METHOD_GET; | |
595 | mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); | |
596 | mad->sa_hdr.comp_mask = comp_mask; | |
1da177e4 | 597 | |
34816ad9 | 598 | ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data); |
1da177e4 LT |
599 | |
600 | *sa_query = &query->sa_query; | |
dae4c1d2 | 601 | |
1da177e4 | 602 | ret = send_mad(&query->sa_query, timeout_ms); |
34816ad9 SH |
603 | if (ret < 0) |
604 | goto err2; | |
605 | ||
606 | return ret; | |
607 | ||
608 | err2: | |
609 | *sa_query = NULL; | |
610 | ib_free_send_mad(query->sa_query.mad_buf); | |
1da177e4 | 611 | |
34816ad9 SH |
612 | err1: |
613 | kfree(query); | |
dae4c1d2 | 614 | return ret; |
1da177e4 LT |
615 | } |
616 | EXPORT_SYMBOL(ib_sa_path_rec_get); | |
617 | ||
cbae32c5 HR |
618 | static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, |
619 | int status, | |
620 | struct ib_sa_mad *mad) | |
621 | { | |
622 | struct ib_sa_service_query *query = | |
623 | container_of(sa_query, struct ib_sa_service_query, sa_query); | |
624 | ||
625 | if (mad) { | |
626 | struct ib_sa_service_rec rec; | |
627 | ||
628 | ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table), | |
629 | mad->data, &rec); | |
630 | query->callback(status, &rec, query->context); | |
631 | } else | |
632 | query->callback(status, NULL, query->context); | |
633 | } | |
634 | ||
635 | static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) | |
636 | { | |
cbae32c5 HR |
637 | kfree(container_of(sa_query, struct ib_sa_service_query, sa_query)); |
638 | } | |
639 | ||
640 | /** | |
641 | * ib_sa_service_rec_query - Start Service Record operation | |
642 | * @device:device to send request on | |
643 | * @port_num: port number to send request on | |
644 | * @method:SA method - should be get, set, or delete | |
645 | * @rec:Service Record to send in request | |
646 | * @comp_mask:component mask to send in request | |
647 | * @timeout_ms:time to wait for response | |
648 | * @gfp_mask:GFP mask to use for internal allocations | |
649 | * @callback:function called when request completes, times out or is | |
650 | * canceled | |
651 | * @context:opaque user context passed to callback | |
652 | * @sa_query:request context, used to cancel request | |
653 | * | |
654 | * Send a Service Record set/get/delete to the SA to register, | |
655 | * unregister or query a service record. | |
656 | * The callback function will be called when the request completes (or | |
657 | * fails); status is 0 for a successful response, -EINTR if the query | |
658 | * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error | |
659 | * occurred sending the query. The resp parameter of the callback is | |
660 | * only valid if status is 0. | |
661 | * | |
662 | * If the return value of ib_sa_service_rec_query() is negative, it is an | |
663 | * error code. Otherwise it is a request ID that can be used to cancel | |
664 | * the query. | |
665 | */ | |
666 | int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, | |
667 | struct ib_sa_service_rec *rec, | |
668 | ib_sa_comp_mask comp_mask, | |
dd0fc66f | 669 | int timeout_ms, gfp_t gfp_mask, |
cbae32c5 HR |
670 | void (*callback)(int status, |
671 | struct ib_sa_service_rec *resp, | |
672 | void *context), | |
673 | void *context, | |
674 | struct ib_sa_query **sa_query) | |
675 | { | |
676 | struct ib_sa_service_query *query; | |
677 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); | |
56c202d6 RD |
678 | struct ib_sa_port *port; |
679 | struct ib_mad_agent *agent; | |
34816ad9 | 680 | struct ib_sa_mad *mad; |
cbae32c5 HR |
681 | int ret; |
682 | ||
56c202d6 RD |
683 | if (!sa_dev) |
684 | return -ENODEV; | |
685 | ||
686 | port = &sa_dev->port[port_num - sa_dev->start_port]; | |
687 | agent = port->agent; | |
688 | ||
cbae32c5 HR |
689 | if (method != IB_MGMT_METHOD_GET && |
690 | method != IB_MGMT_METHOD_SET && | |
691 | method != IB_SA_METHOD_DELETE) | |
692 | return -EINVAL; | |
693 | ||
694 | query = kmalloc(sizeof *query, gfp_mask); | |
695 | if (!query) | |
696 | return -ENOMEM; | |
34816ad9 SH |
697 | |
698 | query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0, | |
699 | 0, IB_MGMT_SA_HDR, | |
700 | IB_MGMT_SA_DATA, gfp_mask); | |
701 | if (!query->sa_query.mad_buf) { | |
702 | ret = -ENOMEM; | |
703 | goto err1; | |
cbae32c5 HR |
704 | } |
705 | ||
706 | query->callback = callback; | |
707 | query->context = context; | |
708 | ||
34816ad9 SH |
709 | mad = query->sa_query.mad_buf->mad; |
710 | init_mad(mad, agent); | |
cbae32c5 | 711 | |
34816ad9 SH |
712 | query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL; |
713 | query->sa_query.release = ib_sa_service_rec_release; | |
714 | query->sa_query.port = port; | |
715 | mad->mad_hdr.method = method; | |
716 | mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC); | |
717 | mad->sa_hdr.comp_mask = comp_mask; | |
cbae32c5 HR |
718 | |
719 | ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), | |
34816ad9 | 720 | rec, mad->data); |
cbae32c5 HR |
721 | |
722 | *sa_query = &query->sa_query; | |
723 | ||
724 | ret = send_mad(&query->sa_query, timeout_ms); | |
34816ad9 SH |
725 | if (ret < 0) |
726 | goto err2; | |
727 | ||
728 | return ret; | |
cbae32c5 | 729 | |
34816ad9 SH |
730 | err2: |
731 | *sa_query = NULL; | |
732 | ib_free_send_mad(query->sa_query.mad_buf); | |
733 | ||
734 | err1: | |
735 | kfree(query); | |
cbae32c5 HR |
736 | return ret; |
737 | } | |
738 | EXPORT_SYMBOL(ib_sa_service_rec_query); | |
739 | ||
1da177e4 LT |
740 | static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, |
741 | int status, | |
742 | struct ib_sa_mad *mad) | |
743 | { | |
744 | struct ib_sa_mcmember_query *query = | |
745 | container_of(sa_query, struct ib_sa_mcmember_query, sa_query); | |
746 | ||
747 | if (mad) { | |
748 | struct ib_sa_mcmember_rec rec; | |
749 | ||
750 | ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), | |
751 | mad->data, &rec); | |
752 | query->callback(status, &rec, query->context); | |
753 | } else | |
754 | query->callback(status, NULL, query->context); | |
755 | } | |
756 | ||
757 | static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) | |
758 | { | |
1da177e4 LT |
759 | kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); |
760 | } | |
761 | ||
762 | int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, | |
763 | u8 method, | |
764 | struct ib_sa_mcmember_rec *rec, | |
765 | ib_sa_comp_mask comp_mask, | |
dd0fc66f | 766 | int timeout_ms, gfp_t gfp_mask, |
1da177e4 LT |
767 | void (*callback)(int status, |
768 | struct ib_sa_mcmember_rec *resp, | |
769 | void *context), | |
770 | void *context, | |
771 | struct ib_sa_query **sa_query) | |
772 | { | |
773 | struct ib_sa_mcmember_query *query; | |
774 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); | |
56c202d6 RD |
775 | struct ib_sa_port *port; |
776 | struct ib_mad_agent *agent; | |
34816ad9 | 777 | struct ib_sa_mad *mad; |
1da177e4 LT |
778 | int ret; |
779 | ||
56c202d6 RD |
780 | if (!sa_dev) |
781 | return -ENODEV; | |
782 | ||
783 | port = &sa_dev->port[port_num - sa_dev->start_port]; | |
784 | agent = port->agent; | |
785 | ||
1da177e4 LT |
786 | query = kmalloc(sizeof *query, gfp_mask); |
787 | if (!query) | |
788 | return -ENOMEM; | |
34816ad9 SH |
789 | |
790 | query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0, | |
791 | 0, IB_MGMT_SA_HDR, | |
792 | IB_MGMT_SA_DATA, gfp_mask); | |
793 | if (!query->sa_query.mad_buf) { | |
794 | ret = -ENOMEM; | |
795 | goto err1; | |
1da177e4 LT |
796 | } |
797 | ||
798 | query->callback = callback; | |
799 | query->context = context; | |
800 | ||
34816ad9 SH |
801 | mad = query->sa_query.mad_buf->mad; |
802 | init_mad(mad, agent); | |
1da177e4 | 803 | |
34816ad9 SH |
804 | query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; |
805 | query->sa_query.release = ib_sa_mcmember_rec_release; | |
806 | query->sa_query.port = port; | |
807 | mad->mad_hdr.method = method; | |
808 | mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); | |
809 | mad->sa_hdr.comp_mask = comp_mask; | |
1da177e4 LT |
810 | |
811 | ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), | |
34816ad9 | 812 | rec, mad->data); |
1da177e4 LT |
813 | |
814 | *sa_query = &query->sa_query; | |
dae4c1d2 | 815 | |
1da177e4 | 816 | ret = send_mad(&query->sa_query, timeout_ms); |
34816ad9 SH |
817 | if (ret < 0) |
818 | goto err2; | |
1da177e4 | 819 | |
dae4c1d2 | 820 | return ret; |
34816ad9 SH |
821 | |
822 | err2: | |
823 | *sa_query = NULL; | |
824 | ib_free_send_mad(query->sa_query.mad_buf); | |
825 | ||
826 | err1: | |
827 | kfree(query); | |
828 | return ret; | |
1da177e4 LT |
829 | } |
830 | EXPORT_SYMBOL(ib_sa_mcmember_rec_query); | |
831 | ||
832 | static void send_handler(struct ib_mad_agent *agent, | |
833 | struct ib_mad_send_wc *mad_send_wc) | |
834 | { | |
34816ad9 | 835 | struct ib_sa_query *query = mad_send_wc->send_buf->context[0]; |
1da177e4 LT |
836 | unsigned long flags; |
837 | ||
e4f50f00 RD |
838 | if (query->callback) |
839 | switch (mad_send_wc->status) { | |
840 | case IB_WC_SUCCESS: | |
841 | /* No callback -- already got recv */ | |
842 | break; | |
843 | case IB_WC_RESP_TIMEOUT_ERR: | |
844 | query->callback(query, -ETIMEDOUT, NULL); | |
845 | break; | |
846 | case IB_WC_WR_FLUSH_ERR: | |
847 | query->callback(query, -EINTR, NULL); | |
848 | break; | |
849 | default: | |
850 | query->callback(query, -EIO, NULL); | |
851 | break; | |
852 | } | |
1da177e4 | 853 | |
1da177e4 | 854 | spin_lock_irqsave(&idr_lock, flags); |
34816ad9 | 855 | idr_remove(&query_idr, query->id); |
1da177e4 | 856 | spin_unlock_irqrestore(&idr_lock, flags); |
34816ad9 SH |
857 | |
858 | ib_free_send_mad(mad_send_wc->send_buf); | |
859 | kref_put(&query->sm_ah->ref, free_sm_ah); | |
860 | query->release(query); | |
1da177e4 LT |
861 | } |
862 | ||
863 | static void recv_handler(struct ib_mad_agent *mad_agent, | |
864 | struct ib_mad_recv_wc *mad_recv_wc) | |
865 | { | |
866 | struct ib_sa_query *query; | |
34816ad9 | 867 | struct ib_mad_send_buf *mad_buf; |
1da177e4 | 868 | |
34816ad9 SH |
869 | mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id; |
870 | query = mad_buf->context[0]; | |
1da177e4 | 871 | |
34816ad9 | 872 | if (query->callback) { |
1da177e4 LT |
873 | if (mad_recv_wc->wc->status == IB_WC_SUCCESS) |
874 | query->callback(query, | |
875 | mad_recv_wc->recv_buf.mad->mad_hdr.status ? | |
876 | -EINVAL : 0, | |
877 | (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad); | |
878 | else | |
879 | query->callback(query, -EIO, NULL); | |
880 | } | |
881 | ||
882 | ib_free_recv_mad(mad_recv_wc); | |
883 | } | |
884 | ||
885 | static void ib_sa_add_one(struct ib_device *device) | |
886 | { | |
887 | struct ib_sa_device *sa_dev; | |
888 | int s, e, i; | |
889 | ||
890 | if (device->node_type == IB_NODE_SWITCH) | |
891 | s = e = 0; | |
892 | else { | |
893 | s = 1; | |
894 | e = device->phys_port_cnt; | |
895 | } | |
896 | ||
897 | sa_dev = kmalloc(sizeof *sa_dev + | |
898 | (e - s + 1) * sizeof (struct ib_sa_port), | |
899 | GFP_KERNEL); | |
900 | if (!sa_dev) | |
901 | return; | |
902 | ||
903 | sa_dev->start_port = s; | |
904 | sa_dev->end_port = e; | |
905 | ||
906 | for (i = 0; i <= e - s; ++i) { | |
1da177e4 LT |
907 | sa_dev->port[i].sm_ah = NULL; |
908 | sa_dev->port[i].port_num = i + s; | |
909 | spin_lock_init(&sa_dev->port[i].ah_lock); | |
910 | ||
911 | sa_dev->port[i].agent = | |
912 | ib_register_mad_agent(device, i + s, IB_QPT_GSI, | |
913 | NULL, 0, send_handler, | |
914 | recv_handler, sa_dev); | |
915 | if (IS_ERR(sa_dev->port[i].agent)) | |
916 | goto err; | |
917 | ||
1da177e4 LT |
918 | INIT_WORK(&sa_dev->port[i].update_task, |
919 | update_sm_ah, &sa_dev->port[i]); | |
920 | } | |
921 | ||
922 | ib_set_client_data(device, &sa_client, sa_dev); | |
923 | ||
924 | /* | |
925 | * We register our event handler after everything is set up, | |
926 | * and then update our cached info after the event handler is | |
927 | * registered to avoid any problems if a port changes state | |
928 | * during our initialization. | |
929 | */ | |
930 | ||
931 | INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event); | |
932 | if (ib_register_event_handler(&sa_dev->event_handler)) | |
933 | goto err; | |
934 | ||
935 | for (i = 0; i <= e - s; ++i) | |
936 | update_sm_ah(&sa_dev->port[i]); | |
937 | ||
938 | return; | |
939 | ||
940 | err: | |
b82cab6b | 941 | while (--i >= 0) |
1da177e4 | 942 | ib_unregister_mad_agent(sa_dev->port[i].agent); |
1da177e4 LT |
943 | |
944 | kfree(sa_dev); | |
945 | ||
946 | return; | |
947 | } | |
948 | ||
949 | static void ib_sa_remove_one(struct ib_device *device) | |
950 | { | |
951 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); | |
952 | int i; | |
953 | ||
954 | if (!sa_dev) | |
955 | return; | |
956 | ||
957 | ib_unregister_event_handler(&sa_dev->event_handler); | |
958 | ||
959 | for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { | |
960 | ib_unregister_mad_agent(sa_dev->port[i].agent); | |
961 | kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); | |
962 | } | |
963 | ||
964 | kfree(sa_dev); | |
965 | } | |
966 | ||
967 | static int __init ib_sa_init(void) | |
968 | { | |
969 | int ret; | |
970 | ||
971 | spin_lock_init(&idr_lock); | |
972 | spin_lock_init(&tid_lock); | |
973 | ||
974 | get_random_bytes(&tid, sizeof tid); | |
975 | ||
976 | ret = ib_register_client(&sa_client); | |
977 | if (ret) | |
978 | printk(KERN_ERR "Couldn't register ib_sa client\n"); | |
979 | ||
980 | return ret; | |
981 | } | |
982 | ||
983 | static void __exit ib_sa_cleanup(void) | |
984 | { | |
985 | ib_unregister_client(&sa_client); | |
5d7edb3c | 986 | idr_destroy(&query_idr); |
1da177e4 LT |
987 | } |
988 | ||
989 | module_init(ib_sa_init); | |
990 | module_exit(ib_sa_cleanup); |