]>
Commit | Line | Data |
---|---|---|
aef9ec39 RD |
1 | /* |
2 | * Copyright (c) 2005 Cisco Systems. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | * $Id: ib_srp.c 3932 2005-11-01 17:19:29Z roland $ | |
33 | */ | |
34 | ||
aef9ec39 RD |
35 | #include <linux/module.h> |
36 | #include <linux/init.h> | |
37 | #include <linux/slab.h> | |
38 | #include <linux/err.h> | |
39 | #include <linux/string.h> | |
40 | #include <linux/parser.h> | |
41 | #include <linux/random.h> | |
de25968c | 42 | #include <linux/jiffies.h> |
aef9ec39 RD |
43 | |
44 | #include <asm/atomic.h> | |
45 | ||
46 | #include <scsi/scsi.h> | |
47 | #include <scsi/scsi_device.h> | |
48 | #include <scsi/scsi_dbg.h> | |
49 | #include <scsi/srp.h> | |
50 | ||
51 | #include <rdma/ib_cache.h> | |
52 | ||
53 | #include "ib_srp.h" | |
54 | ||
55 | #define DRV_NAME "ib_srp" | |
56 | #define PFX DRV_NAME ": " | |
57 | #define DRV_VERSION "0.2" | |
58 | #define DRV_RELDATE "November 1, 2005" | |
59 | ||
60 | MODULE_AUTHOR("Roland Dreier"); | |
61 | MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator " | |
62 | "v" DRV_VERSION " (" DRV_RELDATE ")"); | |
63 | MODULE_LICENSE("Dual BSD/GPL"); | |
64 | ||
65 | static int topspin_workarounds = 1; | |
66 | ||
67 | module_param(topspin_workarounds, int, 0444); | |
68 | MODULE_PARM_DESC(topspin_workarounds, | |
69 | "Enable workarounds for Topspin/Cisco SRP target bugs if != 0"); | |
70 | ||
71 | static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad }; | |
72 | ||
73 | static void srp_add_one(struct ib_device *device); | |
74 | static void srp_remove_one(struct ib_device *device); | |
75 | static void srp_completion(struct ib_cq *cq, void *target_ptr); | |
76 | static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); | |
77 | ||
78 | static struct ib_client srp_client = { | |
79 | .name = "srp", | |
80 | .add = srp_add_one, | |
81 | .remove = srp_remove_one | |
82 | }; | |
83 | ||
84 | static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) | |
85 | { | |
86 | return (struct srp_target_port *) host->hostdata; | |
87 | } | |
88 | ||
89 | static const char *srp_target_info(struct Scsi_Host *host) | |
90 | { | |
91 | return host_to_target(host)->target_name; | |
92 | } | |
93 | ||
94 | static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, | |
95 | gfp_t gfp_mask, | |
96 | enum dma_data_direction direction) | |
97 | { | |
98 | struct srp_iu *iu; | |
99 | ||
100 | iu = kmalloc(sizeof *iu, gfp_mask); | |
101 | if (!iu) | |
102 | goto out; | |
103 | ||
104 | iu->buf = kzalloc(size, gfp_mask); | |
105 | if (!iu->buf) | |
106 | goto out_free_iu; | |
107 | ||
f5358a17 RD |
108 | iu->dma = dma_map_single(host->dev->dev->dma_device, |
109 | iu->buf, size, direction); | |
aef9ec39 RD |
110 | if (dma_mapping_error(iu->dma)) |
111 | goto out_free_buf; | |
112 | ||
113 | iu->size = size; | |
114 | iu->direction = direction; | |
115 | ||
116 | return iu; | |
117 | ||
118 | out_free_buf: | |
119 | kfree(iu->buf); | |
120 | out_free_iu: | |
121 | kfree(iu); | |
122 | out: | |
123 | return NULL; | |
124 | } | |
125 | ||
126 | static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) | |
127 | { | |
128 | if (!iu) | |
129 | return; | |
130 | ||
f5358a17 RD |
131 | dma_unmap_single(host->dev->dev->dma_device, |
132 | iu->dma, iu->size, iu->direction); | |
aef9ec39 RD |
133 | kfree(iu->buf); |
134 | kfree(iu); | |
135 | } | |
136 | ||
137 | static void srp_qp_event(struct ib_event *event, void *context) | |
138 | { | |
139 | printk(KERN_ERR PFX "QP event %d\n", event->event); | |
140 | } | |
141 | ||
142 | static int srp_init_qp(struct srp_target_port *target, | |
143 | struct ib_qp *qp) | |
144 | { | |
145 | struct ib_qp_attr *attr; | |
146 | int ret; | |
147 | ||
148 | attr = kmalloc(sizeof *attr, GFP_KERNEL); | |
149 | if (!attr) | |
150 | return -ENOMEM; | |
151 | ||
f5358a17 | 152 | ret = ib_find_cached_pkey(target->srp_host->dev->dev, |
aef9ec39 RD |
153 | target->srp_host->port, |
154 | be16_to_cpu(target->path.pkey), | |
155 | &attr->pkey_index); | |
156 | if (ret) | |
157 | goto out; | |
158 | ||
159 | attr->qp_state = IB_QPS_INIT; | |
160 | attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | | |
161 | IB_ACCESS_REMOTE_WRITE); | |
162 | attr->port_num = target->srp_host->port; | |
163 | ||
164 | ret = ib_modify_qp(qp, attr, | |
165 | IB_QP_STATE | | |
166 | IB_QP_PKEY_INDEX | | |
167 | IB_QP_ACCESS_FLAGS | | |
168 | IB_QP_PORT); | |
169 | ||
170 | out: | |
171 | kfree(attr); | |
172 | return ret; | |
173 | } | |
174 | ||
175 | static int srp_create_target_ib(struct srp_target_port *target) | |
176 | { | |
177 | struct ib_qp_init_attr *init_attr; | |
178 | int ret; | |
179 | ||
180 | init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); | |
181 | if (!init_attr) | |
182 | return -ENOMEM; | |
183 | ||
f5358a17 | 184 | target->cq = ib_create_cq(target->srp_host->dev->dev, srp_completion, |
aef9ec39 RD |
185 | NULL, target, SRP_CQ_SIZE); |
186 | if (IS_ERR(target->cq)) { | |
187 | ret = PTR_ERR(target->cq); | |
188 | goto out; | |
189 | } | |
190 | ||
191 | ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP); | |
192 | ||
193 | init_attr->event_handler = srp_qp_event; | |
194 | init_attr->cap.max_send_wr = SRP_SQ_SIZE; | |
195 | init_attr->cap.max_recv_wr = SRP_RQ_SIZE; | |
196 | init_attr->cap.max_recv_sge = 1; | |
197 | init_attr->cap.max_send_sge = 1; | |
198 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; | |
199 | init_attr->qp_type = IB_QPT_RC; | |
200 | init_attr->send_cq = target->cq; | |
201 | init_attr->recv_cq = target->cq; | |
202 | ||
f5358a17 | 203 | target->qp = ib_create_qp(target->srp_host->dev->pd, init_attr); |
aef9ec39 RD |
204 | if (IS_ERR(target->qp)) { |
205 | ret = PTR_ERR(target->qp); | |
206 | ib_destroy_cq(target->cq); | |
207 | goto out; | |
208 | } | |
209 | ||
210 | ret = srp_init_qp(target, target->qp); | |
211 | if (ret) { | |
212 | ib_destroy_qp(target->qp); | |
213 | ib_destroy_cq(target->cq); | |
214 | goto out; | |
215 | } | |
216 | ||
217 | out: | |
218 | kfree(init_attr); | |
219 | return ret; | |
220 | } | |
221 | ||
222 | static void srp_free_target_ib(struct srp_target_port *target) | |
223 | { | |
224 | int i; | |
225 | ||
226 | ib_destroy_qp(target->qp); | |
227 | ib_destroy_cq(target->cq); | |
228 | ||
229 | for (i = 0; i < SRP_RQ_SIZE; ++i) | |
230 | srp_free_iu(target->srp_host, target->rx_ring[i]); | |
231 | for (i = 0; i < SRP_SQ_SIZE + 1; ++i) | |
232 | srp_free_iu(target->srp_host, target->tx_ring[i]); | |
233 | } | |
234 | ||
235 | static void srp_path_rec_completion(int status, | |
236 | struct ib_sa_path_rec *pathrec, | |
237 | void *target_ptr) | |
238 | { | |
239 | struct srp_target_port *target = target_ptr; | |
240 | ||
241 | target->status = status; | |
242 | if (status) | |
243 | printk(KERN_ERR PFX "Got failed path rec status %d\n", status); | |
244 | else | |
245 | target->path = *pathrec; | |
246 | complete(&target->done); | |
247 | } | |
248 | ||
249 | static int srp_lookup_path(struct srp_target_port *target) | |
250 | { | |
251 | target->path.numb_path = 1; | |
252 | ||
253 | init_completion(&target->done); | |
254 | ||
f5358a17 | 255 | target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev->dev, |
aef9ec39 RD |
256 | target->srp_host->port, |
257 | &target->path, | |
258 | IB_SA_PATH_REC_DGID | | |
259 | IB_SA_PATH_REC_SGID | | |
260 | IB_SA_PATH_REC_NUMB_PATH | | |
261 | IB_SA_PATH_REC_PKEY, | |
262 | SRP_PATH_REC_TIMEOUT_MS, | |
263 | GFP_KERNEL, | |
264 | srp_path_rec_completion, | |
265 | target, &target->path_query); | |
266 | if (target->path_query_id < 0) | |
267 | return target->path_query_id; | |
268 | ||
269 | wait_for_completion(&target->done); | |
270 | ||
271 | if (target->status < 0) | |
272 | printk(KERN_WARNING PFX "Path record query failed\n"); | |
273 | ||
274 | return target->status; | |
275 | } | |
276 | ||
277 | static int srp_send_req(struct srp_target_port *target) | |
278 | { | |
279 | struct { | |
280 | struct ib_cm_req_param param; | |
281 | struct srp_login_req priv; | |
282 | } *req = NULL; | |
283 | int status; | |
284 | ||
285 | req = kzalloc(sizeof *req, GFP_KERNEL); | |
286 | if (!req) | |
287 | return -ENOMEM; | |
288 | ||
289 | req->param.primary_path = &target->path; | |
290 | req->param.alternate_path = NULL; | |
291 | req->param.service_id = target->service_id; | |
292 | req->param.qp_num = target->qp->qp_num; | |
293 | req->param.qp_type = target->qp->qp_type; | |
294 | req->param.private_data = &req->priv; | |
295 | req->param.private_data_len = sizeof req->priv; | |
296 | req->param.flow_control = 1; | |
297 | ||
298 | get_random_bytes(&req->param.starting_psn, 4); | |
299 | req->param.starting_psn &= 0xffffff; | |
300 | ||
301 | /* | |
302 | * Pick some arbitrary defaults here; we could make these | |
303 | * module parameters if anyone cared about setting them. | |
304 | */ | |
305 | req->param.responder_resources = 4; | |
306 | req->param.remote_cm_response_timeout = 20; | |
307 | req->param.local_cm_response_timeout = 20; | |
308 | req->param.retry_count = 7; | |
309 | req->param.rnr_retry_count = 7; | |
310 | req->param.max_cm_retries = 15; | |
311 | ||
312 | req->priv.opcode = SRP_LOGIN_REQ; | |
313 | req->priv.tag = 0; | |
314 | req->priv.req_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); | |
315 | req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | | |
316 | SRP_BUF_FORMAT_INDIRECT); | |
317 | memcpy(req->priv.initiator_port_id, target->srp_host->initiator_port_id, 16); | |
318 | /* | |
319 | * Topspin/Cisco SRP targets will reject our login unless we | |
320 | * zero out the first 8 bytes of our initiator port ID. The | |
321 | * second 8 bytes must be our local node GUID, but we always | |
322 | * use that anyway. | |
323 | */ | |
324 | if (topspin_workarounds && !memcmp(&target->ioc_guid, topspin_oui, 3)) { | |
325 | printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround " | |
326 | "activated for target GUID %016llx\n", | |
327 | (unsigned long long) be64_to_cpu(target->ioc_guid)); | |
328 | memset(req->priv.initiator_port_id, 0, 8); | |
329 | } | |
330 | memcpy(req->priv.target_port_id, &target->id_ext, 8); | |
331 | memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8); | |
332 | ||
333 | status = ib_send_cm_req(target->cm_id, &req->param); | |
334 | ||
335 | kfree(req); | |
336 | ||
337 | return status; | |
338 | } | |
339 | ||
340 | static void srp_disconnect_target(struct srp_target_port *target) | |
341 | { | |
342 | /* XXX should send SRP_I_LOGOUT request */ | |
343 | ||
344 | init_completion(&target->done); | |
e6581056 RD |
345 | if (ib_send_cm_dreq(target->cm_id, NULL, 0)) { |
346 | printk(KERN_DEBUG PFX "Sending CM DREQ failed\n"); | |
347 | return; | |
348 | } | |
aef9ec39 RD |
349 | wait_for_completion(&target->done); |
350 | } | |
351 | ||
352 | static void srp_remove_work(void *target_ptr) | |
353 | { | |
354 | struct srp_target_port *target = target_ptr; | |
355 | ||
356 | spin_lock_irq(target->scsi_host->host_lock); | |
357 | if (target->state != SRP_TARGET_DEAD) { | |
358 | spin_unlock_irq(target->scsi_host->host_lock); | |
aef9ec39 RD |
359 | return; |
360 | } | |
361 | target->state = SRP_TARGET_REMOVED; | |
362 | spin_unlock_irq(target->scsi_host->host_lock); | |
363 | ||
8e9e5f4f | 364 | mutex_lock(&target->srp_host->target_mutex); |
aef9ec39 | 365 | list_del(&target->list); |
8e9e5f4f | 366 | mutex_unlock(&target->srp_host->target_mutex); |
aef9ec39 RD |
367 | |
368 | scsi_remove_host(target->scsi_host); | |
369 | ib_destroy_cm_id(target->cm_id); | |
370 | srp_free_target_ib(target); | |
371 | scsi_host_put(target->scsi_host); | |
aef9ec39 RD |
372 | } |
373 | ||
374 | static int srp_connect_target(struct srp_target_port *target) | |
375 | { | |
376 | int ret; | |
377 | ||
378 | ret = srp_lookup_path(target); | |
379 | if (ret) | |
380 | return ret; | |
381 | ||
382 | while (1) { | |
383 | init_completion(&target->done); | |
384 | ret = srp_send_req(target); | |
385 | if (ret) | |
386 | return ret; | |
387 | wait_for_completion(&target->done); | |
388 | ||
389 | /* | |
390 | * The CM event handling code will set status to | |
391 | * SRP_PORT_REDIRECT if we get a port redirect REJ | |
392 | * back, or SRP_DLID_REDIRECT if we get a lid/qp | |
393 | * redirect REJ back. | |
394 | */ | |
395 | switch (target->status) { | |
396 | case 0: | |
397 | return 0; | |
398 | ||
399 | case SRP_PORT_REDIRECT: | |
400 | ret = srp_lookup_path(target); | |
401 | if (ret) | |
402 | return ret; | |
403 | break; | |
404 | ||
405 | case SRP_DLID_REDIRECT: | |
406 | break; | |
407 | ||
408 | default: | |
409 | return target->status; | |
410 | } | |
411 | } | |
412 | } | |
413 | ||
d945e1df RD |
414 | static void srp_unmap_data(struct scsi_cmnd *scmnd, |
415 | struct srp_target_port *target, | |
416 | struct srp_request *req) | |
417 | { | |
418 | struct scatterlist *scat; | |
419 | int nents; | |
420 | ||
421 | if (!scmnd->request_buffer || | |
422 | (scmnd->sc_data_direction != DMA_TO_DEVICE && | |
423 | scmnd->sc_data_direction != DMA_FROM_DEVICE)) | |
424 | return; | |
425 | ||
f5358a17 RD |
426 | if (req->fmr) { |
427 | ib_fmr_pool_unmap(req->fmr); | |
428 | req->fmr = NULL; | |
429 | } | |
430 | ||
d945e1df RD |
431 | /* |
432 | * This handling of non-SG commands can be killed when the | |
433 | * SCSI midlayer no longer generates non-SG commands. | |
434 | */ | |
435 | if (likely(scmnd->use_sg)) { | |
436 | nents = scmnd->use_sg; | |
437 | scat = scmnd->request_buffer; | |
438 | } else { | |
439 | nents = 1; | |
440 | scat = &req->fake_sg; | |
441 | } | |
442 | ||
f5358a17 | 443 | dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents, |
d945e1df RD |
444 | scmnd->sc_data_direction); |
445 | } | |
446 | ||
aef9ec39 RD |
447 | static int srp_reconnect_target(struct srp_target_port *target) |
448 | { | |
449 | struct ib_cm_id *new_cm_id; | |
450 | struct ib_qp_attr qp_attr; | |
451 | struct srp_request *req; | |
452 | struct ib_wc wc; | |
453 | int ret; | |
454 | int i; | |
455 | ||
456 | spin_lock_irq(target->scsi_host->host_lock); | |
457 | if (target->state != SRP_TARGET_LIVE) { | |
458 | spin_unlock_irq(target->scsi_host->host_lock); | |
459 | return -EAGAIN; | |
460 | } | |
461 | target->state = SRP_TARGET_CONNECTING; | |
462 | spin_unlock_irq(target->scsi_host->host_lock); | |
463 | ||
464 | srp_disconnect_target(target); | |
465 | /* | |
466 | * Now get a new local CM ID so that we avoid confusing the | |
467 | * target in case things are really fouled up. | |
468 | */ | |
f5358a17 | 469 | new_cm_id = ib_create_cm_id(target->srp_host->dev->dev, |
aef9ec39 RD |
470 | srp_cm_handler, target); |
471 | if (IS_ERR(new_cm_id)) { | |
472 | ret = PTR_ERR(new_cm_id); | |
473 | goto err; | |
474 | } | |
475 | ib_destroy_cm_id(target->cm_id); | |
476 | target->cm_id = new_cm_id; | |
477 | ||
478 | qp_attr.qp_state = IB_QPS_RESET; | |
479 | ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE); | |
480 | if (ret) | |
481 | goto err; | |
482 | ||
483 | ret = srp_init_qp(target, target->qp); | |
484 | if (ret) | |
485 | goto err; | |
486 | ||
487 | while (ib_poll_cq(target->cq, 1, &wc) > 0) | |
488 | ; /* nothing */ | |
489 | ||
490 | list_for_each_entry(req, &target->req_queue, list) { | |
491 | req->scmnd->result = DID_RESET << 16; | |
492 | req->scmnd->scsi_done(req->scmnd); | |
d945e1df | 493 | srp_unmap_data(req->scmnd, target, req); |
aef9ec39 RD |
494 | } |
495 | ||
496 | target->rx_head = 0; | |
497 | target->tx_head = 0; | |
498 | target->tx_tail = 0; | |
d945e1df | 499 | INIT_LIST_HEAD(&target->free_reqs); |
aef9ec39 | 500 | INIT_LIST_HEAD(&target->req_queue); |
d945e1df RD |
501 | for (i = 0; i < SRP_SQ_SIZE; ++i) |
502 | list_add_tail(&target->req_ring[i].list, &target->free_reqs); | |
aef9ec39 RD |
503 | |
504 | ret = srp_connect_target(target); | |
505 | if (ret) | |
506 | goto err; | |
507 | ||
508 | spin_lock_irq(target->scsi_host->host_lock); | |
509 | if (target->state == SRP_TARGET_CONNECTING) { | |
510 | ret = 0; | |
511 | target->state = SRP_TARGET_LIVE; | |
512 | } else | |
513 | ret = -EAGAIN; | |
514 | spin_unlock_irq(target->scsi_host->host_lock); | |
515 | ||
516 | return ret; | |
517 | ||
518 | err: | |
519 | printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret); | |
520 | ||
521 | /* | |
522 | * We couldn't reconnect, so kill our target port off. | |
523 | * However, we have to defer the real removal because we might | |
524 | * be in the context of the SCSI error handler now, which | |
525 | * would deadlock if we call scsi_remove_host(). | |
526 | */ | |
527 | spin_lock_irq(target->scsi_host->host_lock); | |
528 | if (target->state == SRP_TARGET_CONNECTING) { | |
529 | target->state = SRP_TARGET_DEAD; | |
530 | INIT_WORK(&target->work, srp_remove_work, target); | |
531 | schedule_work(&target->work); | |
532 | } | |
533 | spin_unlock_irq(target->scsi_host->host_lock); | |
534 | ||
535 | return ret; | |
536 | } | |
537 | ||
f5358a17 RD |
538 | static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat, |
539 | int sg_cnt, struct srp_request *req, | |
540 | struct srp_direct_buf *buf) | |
541 | { | |
542 | u64 io_addr = 0; | |
543 | u64 *dma_pages; | |
544 | u32 len; | |
545 | int page_cnt; | |
546 | int i, j; | |
547 | int ret; | |
548 | ||
549 | if (!dev->fmr_pool) | |
550 | return -ENODEV; | |
551 | ||
552 | len = page_cnt = 0; | |
553 | for (i = 0; i < sg_cnt; ++i) { | |
554 | if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) { | |
555 | if (i > 0) | |
556 | return -EINVAL; | |
557 | else | |
558 | ++page_cnt; | |
559 | } | |
560 | if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) & | |
561 | ~dev->fmr_page_mask) { | |
562 | if (i < sg_cnt - 1) | |
563 | return -EINVAL; | |
564 | else | |
565 | ++page_cnt; | |
566 | } | |
567 | ||
568 | len += sg_dma_len(&scat[i]); | |
569 | } | |
570 | ||
571 | page_cnt += len >> dev->fmr_page_shift; | |
572 | if (page_cnt > SRP_FMR_SIZE) | |
573 | return -ENOMEM; | |
574 | ||
575 | dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC); | |
576 | if (!dma_pages) | |
577 | return -ENOMEM; | |
578 | ||
579 | page_cnt = 0; | |
580 | for (i = 0; i < sg_cnt; ++i) | |
581 | for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size) | |
582 | dma_pages[page_cnt++] = | |
583 | (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j; | |
584 | ||
585 | req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, | |
586 | dma_pages, page_cnt, &io_addr); | |
587 | if (IS_ERR(req->fmr)) { | |
588 | ret = PTR_ERR(req->fmr); | |
589 | goto out; | |
590 | } | |
591 | ||
592 | buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask); | |
593 | buf->key = cpu_to_be32(req->fmr->fmr->rkey); | |
594 | buf->len = cpu_to_be32(len); | |
595 | ||
596 | ret = 0; | |
597 | ||
598 | out: | |
599 | kfree(dma_pages); | |
600 | ||
601 | return ret; | |
602 | } | |
603 | ||
aef9ec39 RD |
604 | static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, |
605 | struct srp_request *req) | |
606 | { | |
cf368713 | 607 | struct scatterlist *scat; |
aef9ec39 | 608 | struct srp_cmd *cmd = req->cmd->buf; |
cf368713 | 609 | int len, nents, count; |
f5358a17 | 610 | u8 fmt = SRP_DATA_DESC_DIRECT; |
aef9ec39 RD |
611 | |
612 | if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) | |
613 | return sizeof (struct srp_cmd); | |
614 | ||
615 | if (scmnd->sc_data_direction != DMA_FROM_DEVICE && | |
616 | scmnd->sc_data_direction != DMA_TO_DEVICE) { | |
617 | printk(KERN_WARNING PFX "Unhandled data direction %d\n", | |
618 | scmnd->sc_data_direction); | |
619 | return -EINVAL; | |
620 | } | |
621 | ||
cf368713 RD |
622 | /* |
623 | * This handling of non-SG commands can be killed when the | |
624 | * SCSI midlayer no longer generates non-SG commands. | |
625 | */ | |
626 | if (likely(scmnd->use_sg)) { | |
627 | nents = scmnd->use_sg; | |
628 | scat = scmnd->request_buffer; | |
629 | } else { | |
630 | nents = 1; | |
631 | scat = &req->fake_sg; | |
632 | sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); | |
633 | } | |
aef9ec39 | 634 | |
f5358a17 RD |
635 | count = dma_map_sg(target->srp_host->dev->dev->dma_device, |
636 | scat, nents, scmnd->sc_data_direction); | |
637 | ||
638 | fmt = SRP_DATA_DESC_DIRECT; | |
639 | len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); | |
aef9ec39 | 640 | |
cf368713 | 641 | if (count == 1) { |
f5358a17 RD |
642 | /* |
643 | * The midlayer only generated a single gather/scatter | |
644 | * entry, or DMA mapping coalesced everything to a | |
645 | * single entry. So a direct descriptor along with | |
646 | * the DMA MR suffices. | |
647 | */ | |
cf368713 | 648 | struct srp_direct_buf *buf = (void *) cmd->add_data; |
aef9ec39 | 649 | |
cf368713 | 650 | buf->va = cpu_to_be64(sg_dma_address(scat)); |
f5358a17 | 651 | buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey); |
cf368713 | 652 | buf->len = cpu_to_be32(sg_dma_len(scat)); |
f5358a17 RD |
653 | } else if (srp_map_fmr(target->srp_host->dev, scat, count, req, |
654 | (void *) cmd->add_data)) { | |
655 | /* | |
656 | * FMR mapping failed, and the scatterlist has more | |
657 | * than one entry. Generate an indirect memory | |
658 | * descriptor. | |
659 | */ | |
cf368713 RD |
660 | struct srp_indirect_buf *buf = (void *) cmd->add_data; |
661 | u32 datalen = 0; | |
f5358a17 | 662 | int i; |
aef9ec39 | 663 | |
cf368713 | 664 | fmt = SRP_DATA_DESC_INDIRECT; |
f5358a17 RD |
665 | len = sizeof (struct srp_cmd) + |
666 | sizeof (struct srp_indirect_buf) + | |
667 | count * sizeof (struct srp_direct_buf); | |
668 | ||
669 | for (i = 0; i < count; ++i) { | |
670 | buf->desc_list[i].va = | |
671 | cpu_to_be64(sg_dma_address(&scat[i])); | |
672 | buf->desc_list[i].key = | |
673 | cpu_to_be32(target->srp_host->dev->mr->rkey); | |
674 | buf->desc_list[i].len = | |
675 | cpu_to_be32(sg_dma_len(&scat[i])); | |
676 | datalen += sg_dma_len(&scat[i]); | |
677 | } | |
aef9ec39 | 678 | |
cf368713 RD |
679 | if (scmnd->sc_data_direction == DMA_TO_DEVICE) |
680 | cmd->data_out_desc_cnt = count; | |
681 | else | |
682 | cmd->data_in_desc_cnt = count; | |
683 | ||
f5358a17 RD |
684 | buf->table_desc.va = |
685 | cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf); | |
cf368713 | 686 | buf->table_desc.key = |
f5358a17 | 687 | cpu_to_be32(target->srp_host->dev->mr->rkey); |
cf368713 RD |
688 | buf->table_desc.len = |
689 | cpu_to_be32(count * sizeof (struct srp_direct_buf)); | |
690 | ||
cf368713 | 691 | buf->len = cpu_to_be32(datalen); |
aef9ec39 RD |
692 | } |
693 | ||
694 | if (scmnd->sc_data_direction == DMA_TO_DEVICE) | |
695 | cmd->buf_fmt = fmt << 4; | |
696 | else | |
697 | cmd->buf_fmt = fmt; | |
698 | ||
aef9ec39 RD |
699 | return len; |
700 | } | |
701 | ||
d945e1df | 702 | static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) |
f80887d0 | 703 | { |
d945e1df RD |
704 | srp_unmap_data(req->scmnd, target, req); |
705 | list_move_tail(&req->list, &target->free_reqs); | |
f80887d0 RD |
706 | } |
707 | ||
aef9ec39 RD |
708 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) |
709 | { | |
710 | struct srp_request *req; | |
711 | struct scsi_cmnd *scmnd; | |
712 | unsigned long flags; | |
713 | s32 delta; | |
714 | ||
715 | delta = (s32) be32_to_cpu(rsp->req_lim_delta); | |
716 | ||
717 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | |
718 | ||
719 | target->req_lim += delta; | |
720 | ||
721 | req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT]; | |
722 | ||
723 | if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { | |
724 | if (be32_to_cpu(rsp->resp_data_len) < 4) | |
725 | req->tsk_status = -1; | |
726 | else | |
727 | req->tsk_status = rsp->data[3]; | |
728 | complete(&req->done); | |
729 | } else { | |
d945e1df | 730 | scmnd = req->scmnd; |
aef9ec39 RD |
731 | if (!scmnd) |
732 | printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", | |
733 | (unsigned long long) rsp->tag); | |
734 | scmnd->result = rsp->status; | |
735 | ||
736 | if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { | |
737 | memcpy(scmnd->sense_buffer, rsp->data + | |
738 | be32_to_cpu(rsp->resp_data_len), | |
739 | min_t(int, be32_to_cpu(rsp->sense_data_len), | |
740 | SCSI_SENSE_BUFFERSIZE)); | |
741 | } | |
742 | ||
743 | if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER)) | |
744 | scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt); | |
745 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) | |
746 | scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); | |
747 | ||
aef9ec39 | 748 | if (!req->tsk_mgmt) { |
aef9ec39 RD |
749 | scmnd->host_scribble = (void *) -1L; |
750 | scmnd->scsi_done(scmnd); | |
751 | ||
d945e1df | 752 | srp_remove_req(target, req); |
aef9ec39 RD |
753 | } else |
754 | req->cmd_done = 1; | |
755 | } | |
756 | ||
757 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | |
758 | } | |
759 | ||
760 | static void srp_reconnect_work(void *target_ptr) | |
761 | { | |
762 | struct srp_target_port *target = target_ptr; | |
763 | ||
764 | srp_reconnect_target(target); | |
765 | } | |
766 | ||
767 | static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | |
768 | { | |
769 | struct srp_iu *iu; | |
770 | u8 opcode; | |
771 | ||
772 | iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; | |
773 | ||
f5358a17 | 774 | dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, |
aef9ec39 RD |
775 | target->max_ti_iu_len, DMA_FROM_DEVICE); |
776 | ||
777 | opcode = *(u8 *) iu->buf; | |
778 | ||
779 | if (0) { | |
780 | int i; | |
781 | ||
782 | printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode); | |
783 | ||
784 | for (i = 0; i < wc->byte_len; ++i) { | |
785 | if (i % 8 == 0) | |
786 | printk(KERN_ERR " [%02x] ", i); | |
787 | printk(" %02x", ((u8 *) iu->buf)[i]); | |
788 | if ((i + 1) % 8 == 0) | |
789 | printk("\n"); | |
790 | } | |
791 | ||
792 | if (wc->byte_len % 8) | |
793 | printk("\n"); | |
794 | } | |
795 | ||
796 | switch (opcode) { | |
797 | case SRP_RSP: | |
798 | srp_process_rsp(target, iu->buf); | |
799 | break; | |
800 | ||
801 | case SRP_T_LOGOUT: | |
802 | /* XXX Handle target logout */ | |
803 | printk(KERN_WARNING PFX "Got target logout request\n"); | |
804 | break; | |
805 | ||
806 | default: | |
807 | printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode); | |
808 | break; | |
809 | } | |
810 | ||
f5358a17 | 811 | dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, |
aef9ec39 RD |
812 | target->max_ti_iu_len, DMA_FROM_DEVICE); |
813 | } | |
814 | ||
815 | static void srp_completion(struct ib_cq *cq, void *target_ptr) | |
816 | { | |
817 | struct srp_target_port *target = target_ptr; | |
818 | struct ib_wc wc; | |
819 | unsigned long flags; | |
820 | ||
821 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | |
822 | while (ib_poll_cq(cq, 1, &wc) > 0) { | |
823 | if (wc.status) { | |
824 | printk(KERN_ERR PFX "failed %s status %d\n", | |
825 | wc.wr_id & SRP_OP_RECV ? "receive" : "send", | |
826 | wc.status); | |
827 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | |
828 | if (target->state == SRP_TARGET_LIVE) | |
829 | schedule_work(&target->work); | |
830 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | |
831 | break; | |
832 | } | |
833 | ||
834 | if (wc.wr_id & SRP_OP_RECV) | |
835 | srp_handle_recv(target, &wc); | |
836 | else | |
837 | ++target->tx_tail; | |
838 | } | |
839 | } | |
840 | ||
841 | static int __srp_post_recv(struct srp_target_port *target) | |
842 | { | |
843 | struct srp_iu *iu; | |
844 | struct ib_sge list; | |
845 | struct ib_recv_wr wr, *bad_wr; | |
846 | unsigned int next; | |
847 | int ret; | |
848 | ||
849 | next = target->rx_head & (SRP_RQ_SIZE - 1); | |
850 | wr.wr_id = next | SRP_OP_RECV; | |
851 | iu = target->rx_ring[next]; | |
852 | ||
853 | list.addr = iu->dma; | |
854 | list.length = iu->size; | |
f5358a17 | 855 | list.lkey = target->srp_host->dev->mr->lkey; |
aef9ec39 RD |
856 | |
857 | wr.next = NULL; | |
858 | wr.sg_list = &list; | |
859 | wr.num_sge = 1; | |
860 | ||
861 | ret = ib_post_recv(target->qp, &wr, &bad_wr); | |
862 | if (!ret) | |
863 | ++target->rx_head; | |
864 | ||
865 | return ret; | |
866 | } | |
867 | ||
868 | static int srp_post_recv(struct srp_target_port *target) | |
869 | { | |
870 | unsigned long flags; | |
871 | int ret; | |
872 | ||
873 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | |
874 | ret = __srp_post_recv(target); | |
875 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | |
876 | ||
877 | return ret; | |
878 | } | |
879 | ||
880 | /* | |
881 | * Must be called with target->scsi_host->host_lock held to protect | |
47f2bce9 RD |
882 | * req_lim and tx_head. Lock cannot be dropped between call here and |
883 | * call to __srp_post_send(). | |
aef9ec39 RD |
884 | */ |
885 | static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target) | |
886 | { | |
887 | if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) | |
888 | return NULL; | |
889 | ||
47f2bce9 RD |
890 | if (unlikely(target->req_lim < 1)) { |
891 | if (printk_ratelimit()) | |
892 | printk(KERN_DEBUG PFX "Target has req_lim %d\n", | |
893 | target->req_lim); | |
894 | return NULL; | |
895 | } | |
896 | ||
aef9ec39 RD |
897 | return target->tx_ring[target->tx_head & SRP_SQ_SIZE]; |
898 | } | |
899 | ||
900 | /* | |
901 | * Must be called with target->scsi_host->host_lock held to protect | |
902 | * req_lim and tx_head. | |
903 | */ | |
904 | static int __srp_post_send(struct srp_target_port *target, | |
905 | struct srp_iu *iu, int len) | |
906 | { | |
907 | struct ib_sge list; | |
908 | struct ib_send_wr wr, *bad_wr; | |
909 | int ret = 0; | |
910 | ||
aef9ec39 RD |
911 | list.addr = iu->dma; |
912 | list.length = len; | |
f5358a17 | 913 | list.lkey = target->srp_host->dev->mr->lkey; |
aef9ec39 RD |
914 | |
915 | wr.next = NULL; | |
916 | wr.wr_id = target->tx_head & SRP_SQ_SIZE; | |
917 | wr.sg_list = &list; | |
918 | wr.num_sge = 1; | |
919 | wr.opcode = IB_WR_SEND; | |
920 | wr.send_flags = IB_SEND_SIGNALED; | |
921 | ||
922 | ret = ib_post_send(target->qp, &wr, &bad_wr); | |
923 | ||
924 | if (!ret) { | |
925 | ++target->tx_head; | |
926 | --target->req_lim; | |
927 | } | |
928 | ||
929 | return ret; | |
930 | } | |
931 | ||
932 | static int srp_queuecommand(struct scsi_cmnd *scmnd, | |
933 | void (*done)(struct scsi_cmnd *)) | |
934 | { | |
935 | struct srp_target_port *target = host_to_target(scmnd->device->host); | |
936 | struct srp_request *req; | |
937 | struct srp_iu *iu; | |
938 | struct srp_cmd *cmd; | |
aef9ec39 RD |
939 | int len; |
940 | ||
941 | if (target->state == SRP_TARGET_CONNECTING) | |
942 | goto err; | |
943 | ||
944 | if (target->state == SRP_TARGET_DEAD || | |
945 | target->state == SRP_TARGET_REMOVED) { | |
946 | scmnd->result = DID_BAD_TARGET << 16; | |
947 | done(scmnd); | |
948 | return 0; | |
949 | } | |
950 | ||
951 | iu = __srp_get_tx_iu(target); | |
952 | if (!iu) | |
953 | goto err; | |
954 | ||
f5358a17 | 955 | dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, |
aef9ec39 RD |
956 | SRP_MAX_IU_LEN, DMA_TO_DEVICE); |
957 | ||
d945e1df | 958 | req = list_entry(target->free_reqs.next, struct srp_request, list); |
aef9ec39 RD |
959 | |
960 | scmnd->scsi_done = done; | |
961 | scmnd->result = 0; | |
d945e1df | 962 | scmnd->host_scribble = (void *) (long) req->index; |
aef9ec39 RD |
963 | |
964 | cmd = iu->buf; | |
965 | memset(cmd, 0, sizeof *cmd); | |
966 | ||
967 | cmd->opcode = SRP_CMD; | |
968 | cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); | |
d945e1df | 969 | cmd->tag = req->index; |
aef9ec39 RD |
970 | memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); |
971 | ||
aef9ec39 RD |
972 | req->scmnd = scmnd; |
973 | req->cmd = iu; | |
974 | req->cmd_done = 0; | |
975 | req->tsk_mgmt = NULL; | |
976 | ||
977 | len = srp_map_data(scmnd, target, req); | |
978 | if (len < 0) { | |
979 | printk(KERN_ERR PFX "Failed to map data\n"); | |
980 | goto err; | |
981 | } | |
982 | ||
983 | if (__srp_post_recv(target)) { | |
984 | printk(KERN_ERR PFX "Recv failed\n"); | |
985 | goto err_unmap; | |
986 | } | |
987 | ||
f5358a17 | 988 | dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, |
aef9ec39 RD |
989 | SRP_MAX_IU_LEN, DMA_TO_DEVICE); |
990 | ||
991 | if (__srp_post_send(target, iu, len)) { | |
992 | printk(KERN_ERR PFX "Send failed\n"); | |
993 | goto err_unmap; | |
994 | } | |
995 | ||
d945e1df | 996 | list_move_tail(&req->list, &target->req_queue); |
aef9ec39 RD |
997 | |
998 | return 0; | |
999 | ||
1000 | err_unmap: | |
1001 | srp_unmap_data(scmnd, target, req); | |
1002 | ||
1003 | err: | |
1004 | return SCSI_MLQUEUE_HOST_BUSY; | |
1005 | } | |
1006 | ||
1007 | static int srp_alloc_iu_bufs(struct srp_target_port *target) | |
1008 | { | |
1009 | int i; | |
1010 | ||
1011 | for (i = 0; i < SRP_RQ_SIZE; ++i) { | |
1012 | target->rx_ring[i] = srp_alloc_iu(target->srp_host, | |
1013 | target->max_ti_iu_len, | |
1014 | GFP_KERNEL, DMA_FROM_DEVICE); | |
1015 | if (!target->rx_ring[i]) | |
1016 | goto err; | |
1017 | } | |
1018 | ||
1019 | for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { | |
1020 | target->tx_ring[i] = srp_alloc_iu(target->srp_host, | |
1021 | SRP_MAX_IU_LEN, | |
1022 | GFP_KERNEL, DMA_TO_DEVICE); | |
1023 | if (!target->tx_ring[i]) | |
1024 | goto err; | |
1025 | } | |
1026 | ||
1027 | return 0; | |
1028 | ||
1029 | err: | |
1030 | for (i = 0; i < SRP_RQ_SIZE; ++i) { | |
1031 | srp_free_iu(target->srp_host, target->rx_ring[i]); | |
1032 | target->rx_ring[i] = NULL; | |
1033 | } | |
1034 | ||
1035 | for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { | |
1036 | srp_free_iu(target->srp_host, target->tx_ring[i]); | |
1037 | target->tx_ring[i] = NULL; | |
1038 | } | |
1039 | ||
1040 | return -ENOMEM; | |
1041 | } | |
1042 | ||
1043 | static void srp_cm_rej_handler(struct ib_cm_id *cm_id, | |
1044 | struct ib_cm_event *event, | |
1045 | struct srp_target_port *target) | |
1046 | { | |
1047 | struct ib_class_port_info *cpi; | |
1048 | int opcode; | |
1049 | ||
1050 | switch (event->param.rej_rcvd.reason) { | |
1051 | case IB_CM_REJ_PORT_CM_REDIRECT: | |
1052 | cpi = event->param.rej_rcvd.ari; | |
1053 | target->path.dlid = cpi->redirect_lid; | |
1054 | target->path.pkey = cpi->redirect_pkey; | |
1055 | cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; | |
1056 | memcpy(target->path.dgid.raw, cpi->redirect_gid, 16); | |
1057 | ||
1058 | target->status = target->path.dlid ? | |
1059 | SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; | |
1060 | break; | |
1061 | ||
1062 | case IB_CM_REJ_PORT_REDIRECT: | |
1063 | if (topspin_workarounds && | |
1064 | !memcmp(&target->ioc_guid, topspin_oui, 3)) { | |
1065 | /* | |
1066 | * Topspin/Cisco SRP gateways incorrectly send | |
1067 | * reject reason code 25 when they mean 24 | |
1068 | * (port redirect). | |
1069 | */ | |
1070 | memcpy(target->path.dgid.raw, | |
1071 | event->param.rej_rcvd.ari, 16); | |
1072 | ||
1073 | printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n", | |
1074 | (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix), | |
1075 | (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id)); | |
1076 | ||
1077 | target->status = SRP_PORT_REDIRECT; | |
1078 | } else { | |
1079 | printk(KERN_WARNING " REJ reason: IB_CM_REJ_PORT_REDIRECT\n"); | |
1080 | target->status = -ECONNRESET; | |
1081 | } | |
1082 | break; | |
1083 | ||
1084 | case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID: | |
1085 | printk(KERN_WARNING " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n"); | |
1086 | target->status = -ECONNRESET; | |
1087 | break; | |
1088 | ||
1089 | case IB_CM_REJ_CONSUMER_DEFINED: | |
1090 | opcode = *(u8 *) event->private_data; | |
1091 | if (opcode == SRP_LOGIN_REJ) { | |
1092 | struct srp_login_rej *rej = event->private_data; | |
1093 | u32 reason = be32_to_cpu(rej->reason); | |
1094 | ||
1095 | if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE) | |
1096 | printk(KERN_WARNING PFX | |
1097 | "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); | |
1098 | else | |
1099 | printk(KERN_WARNING PFX | |
1100 | "SRP LOGIN REJECTED, reason 0x%08x\n", reason); | |
1101 | } else | |
1102 | printk(KERN_WARNING " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," | |
1103 | " opcode 0x%02x\n", opcode); | |
1104 | target->status = -ECONNRESET; | |
1105 | break; | |
1106 | ||
1107 | default: | |
1108 | printk(KERN_WARNING " REJ reason 0x%x\n", | |
1109 | event->param.rej_rcvd.reason); | |
1110 | target->status = -ECONNRESET; | |
1111 | } | |
1112 | } | |
1113 | ||
1114 | static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |
1115 | { | |
1116 | struct srp_target_port *target = cm_id->context; | |
1117 | struct ib_qp_attr *qp_attr = NULL; | |
1118 | int attr_mask = 0; | |
1119 | int comp = 0; | |
1120 | int opcode = 0; | |
1121 | ||
1122 | switch (event->event) { | |
1123 | case IB_CM_REQ_ERROR: | |
1124 | printk(KERN_DEBUG PFX "Sending CM REQ failed\n"); | |
1125 | comp = 1; | |
1126 | target->status = -ECONNRESET; | |
1127 | break; | |
1128 | ||
1129 | case IB_CM_REP_RECEIVED: | |
1130 | comp = 1; | |
1131 | opcode = *(u8 *) event->private_data; | |
1132 | ||
1133 | if (opcode == SRP_LOGIN_RSP) { | |
1134 | struct srp_login_rsp *rsp = event->private_data; | |
1135 | ||
1136 | target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); | |
1137 | target->req_lim = be32_to_cpu(rsp->req_lim_delta); | |
1138 | ||
1139 | target->scsi_host->can_queue = min(target->req_lim, | |
1140 | target->scsi_host->can_queue); | |
1141 | } else { | |
1142 | printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode); | |
1143 | target->status = -ECONNRESET; | |
1144 | break; | |
1145 | } | |
1146 | ||
1147 | target->status = srp_alloc_iu_bufs(target); | |
1148 | if (target->status) | |
1149 | break; | |
1150 | ||
1151 | qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); | |
1152 | if (!qp_attr) { | |
1153 | target->status = -ENOMEM; | |
1154 | break; | |
1155 | } | |
1156 | ||
1157 | qp_attr->qp_state = IB_QPS_RTR; | |
1158 | target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); | |
1159 | if (target->status) | |
1160 | break; | |
1161 | ||
1162 | target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); | |
1163 | if (target->status) | |
1164 | break; | |
1165 | ||
1166 | target->status = srp_post_recv(target); | |
1167 | if (target->status) | |
1168 | break; | |
1169 | ||
1170 | qp_attr->qp_state = IB_QPS_RTS; | |
1171 | target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask); | |
1172 | if (target->status) | |
1173 | break; | |
1174 | ||
1175 | target->status = ib_modify_qp(target->qp, qp_attr, attr_mask); | |
1176 | if (target->status) | |
1177 | break; | |
1178 | ||
1179 | target->status = ib_send_cm_rtu(cm_id, NULL, 0); | |
1180 | if (target->status) | |
1181 | break; | |
1182 | ||
1183 | break; | |
1184 | ||
1185 | case IB_CM_REJ_RECEIVED: | |
1186 | printk(KERN_DEBUG PFX "REJ received\n"); | |
1187 | comp = 1; | |
1188 | ||
1189 | srp_cm_rej_handler(cm_id, event, target); | |
1190 | break; | |
1191 | ||
1192 | case IB_CM_MRA_RECEIVED: | |
1193 | printk(KERN_ERR PFX "MRA received\n"); | |
1194 | break; | |
1195 | ||
1196 | case IB_CM_DREP_RECEIVED: | |
1197 | break; | |
1198 | ||
1199 | case IB_CM_TIMEWAIT_EXIT: | |
1200 | printk(KERN_ERR PFX "connection closed\n"); | |
1201 | ||
1202 | comp = 1; | |
1203 | target->status = 0; | |
1204 | break; | |
1205 | ||
1206 | default: | |
1207 | printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event); | |
1208 | break; | |
1209 | } | |
1210 | ||
1211 | if (comp) | |
1212 | complete(&target->done); | |
1213 | ||
1214 | kfree(qp_attr); | |
1215 | ||
1216 | return 0; | |
1217 | } | |
1218 | ||
d945e1df RD |
1219 | static int srp_send_tsk_mgmt(struct srp_target_port *target, |
1220 | struct srp_request *req, u8 func) | |
aef9ec39 | 1221 | { |
aef9ec39 RD |
1222 | struct srp_iu *iu; |
1223 | struct srp_tsk_mgmt *tsk_mgmt; | |
aef9ec39 RD |
1224 | |
1225 | spin_lock_irq(target->scsi_host->host_lock); | |
1226 | ||
1285b3a0 RD |
1227 | if (target->state == SRP_TARGET_DEAD || |
1228 | target->state == SRP_TARGET_REMOVED) { | |
d945e1df | 1229 | req->scmnd->result = DID_BAD_TARGET << 16; |
1285b3a0 RD |
1230 | goto out; |
1231 | } | |
1232 | ||
aef9ec39 RD |
1233 | init_completion(&req->done); |
1234 | ||
1235 | iu = __srp_get_tx_iu(target); | |
1236 | if (!iu) | |
1237 | goto out; | |
1238 | ||
1239 | tsk_mgmt = iu->buf; | |
1240 | memset(tsk_mgmt, 0, sizeof *tsk_mgmt); | |
1241 | ||
1242 | tsk_mgmt->opcode = SRP_TSK_MGMT; | |
d945e1df RD |
1243 | tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48); |
1244 | tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT; | |
aef9ec39 | 1245 | tsk_mgmt->tsk_mgmt_func = func; |
d945e1df | 1246 | tsk_mgmt->task_tag = req->index; |
aef9ec39 RD |
1247 | |
1248 | if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) | |
1249 | goto out; | |
1250 | ||
1251 | req->tsk_mgmt = iu; | |
1252 | ||
1253 | spin_unlock_irq(target->scsi_host->host_lock); | |
d945e1df | 1254 | |
aef9ec39 RD |
1255 | if (!wait_for_completion_timeout(&req->done, |
1256 | msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) | |
d945e1df | 1257 | return -1; |
aef9ec39 | 1258 | |
d945e1df | 1259 | return 0; |
aef9ec39 RD |
1260 | |
1261 | out: | |
1262 | spin_unlock_irq(target->scsi_host->host_lock); | |
d945e1df RD |
1263 | return -1; |
1264 | } | |
1265 | ||
1266 | static int srp_find_req(struct srp_target_port *target, | |
1267 | struct scsi_cmnd *scmnd, | |
1268 | struct srp_request **req) | |
1269 | { | |
1270 | if (scmnd->host_scribble == (void *) -1L) | |
1271 | return -1; | |
1272 | ||
1273 | *req = &target->req_ring[(long) scmnd->host_scribble]; | |
1274 | ||
1275 | return 0; | |
aef9ec39 RD |
1276 | } |
1277 | ||
1278 | static int srp_abort(struct scsi_cmnd *scmnd) | |
1279 | { | |
d945e1df RD |
1280 | struct srp_target_port *target = host_to_target(scmnd->device->host); |
1281 | struct srp_request *req; | |
1282 | int ret = SUCCESS; | |
1283 | ||
aef9ec39 RD |
1284 | printk(KERN_ERR "SRP abort called\n"); |
1285 | ||
d945e1df RD |
1286 | if (srp_find_req(target, scmnd, &req)) |
1287 | return FAILED; | |
1288 | if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) | |
1289 | return FAILED; | |
1290 | ||
1291 | spin_lock_irq(target->scsi_host->host_lock); | |
1292 | ||
1293 | if (req->cmd_done) { | |
1294 | srp_remove_req(target, req); | |
1295 | scmnd->scsi_done(scmnd); | |
1296 | } else if (!req->tsk_status) { | |
1297 | srp_remove_req(target, req); | |
1298 | scmnd->result = DID_ABORT << 16; | |
1299 | } else | |
1300 | ret = FAILED; | |
1301 | ||
1302 | spin_unlock_irq(target->scsi_host->host_lock); | |
1303 | ||
1304 | return ret; | |
aef9ec39 RD |
1305 | } |
1306 | ||
1307 | static int srp_reset_device(struct scsi_cmnd *scmnd) | |
1308 | { | |
d945e1df RD |
1309 | struct srp_target_port *target = host_to_target(scmnd->device->host); |
1310 | struct srp_request *req, *tmp; | |
1311 | ||
aef9ec39 RD |
1312 | printk(KERN_ERR "SRP reset_device called\n"); |
1313 | ||
d945e1df RD |
1314 | if (srp_find_req(target, scmnd, &req)) |
1315 | return FAILED; | |
1316 | if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) | |
1317 | return FAILED; | |
1318 | if (req->tsk_status) | |
1319 | return FAILED; | |
1320 | ||
1321 | spin_lock_irq(target->scsi_host->host_lock); | |
1322 | ||
1323 | list_for_each_entry_safe(req, tmp, &target->req_queue, list) | |
1324 | if (req->scmnd->device == scmnd->device) { | |
1325 | req->scmnd->result = DID_RESET << 16; | |
093beac1 | 1326 | req->scmnd->scsi_done(req->scmnd); |
d945e1df RD |
1327 | srp_remove_req(target, req); |
1328 | } | |
1329 | ||
1330 | spin_unlock_irq(target->scsi_host->host_lock); | |
1331 | ||
1332 | return SUCCESS; | |
aef9ec39 RD |
1333 | } |
1334 | ||
1335 | static int srp_reset_host(struct scsi_cmnd *scmnd) | |
1336 | { | |
1337 | struct srp_target_port *target = host_to_target(scmnd->device->host); | |
1338 | int ret = FAILED; | |
1339 | ||
1340 | printk(KERN_ERR PFX "SRP reset_host called\n"); | |
1341 | ||
1342 | if (!srp_reconnect_target(target)) | |
1343 | ret = SUCCESS; | |
1344 | ||
1345 | return ret; | |
1346 | } | |
1347 | ||
6ecb0c84 RD |
1348 | static ssize_t show_id_ext(struct class_device *cdev, char *buf) |
1349 | { | |
1350 | struct srp_target_port *target = host_to_target(class_to_shost(cdev)); | |
1351 | ||
1352 | if (target->state == SRP_TARGET_DEAD || | |
1353 | target->state == SRP_TARGET_REMOVED) | |
1354 | return -ENODEV; | |
1355 | ||
1356 | return sprintf(buf, "0x%016llx\n", | |
1357 | (unsigned long long) be64_to_cpu(target->id_ext)); | |
1358 | } | |
1359 | ||
1360 | static ssize_t show_ioc_guid(struct class_device *cdev, char *buf) | |
1361 | { | |
1362 | struct srp_target_port *target = host_to_target(class_to_shost(cdev)); | |
1363 | ||
1364 | if (target->state == SRP_TARGET_DEAD || | |
1365 | target->state == SRP_TARGET_REMOVED) | |
1366 | return -ENODEV; | |
1367 | ||
1368 | return sprintf(buf, "0x%016llx\n", | |
1369 | (unsigned long long) be64_to_cpu(target->ioc_guid)); | |
1370 | } | |
1371 | ||
1372 | static ssize_t show_service_id(struct class_device *cdev, char *buf) | |
1373 | { | |
1374 | struct srp_target_port *target = host_to_target(class_to_shost(cdev)); | |
1375 | ||
1376 | if (target->state == SRP_TARGET_DEAD || | |
1377 | target->state == SRP_TARGET_REMOVED) | |
1378 | return -ENODEV; | |
1379 | ||
1380 | return sprintf(buf, "0x%016llx\n", | |
1381 | (unsigned long long) be64_to_cpu(target->service_id)); | |
1382 | } | |
1383 | ||
1384 | static ssize_t show_pkey(struct class_device *cdev, char *buf) | |
1385 | { | |
1386 | struct srp_target_port *target = host_to_target(class_to_shost(cdev)); | |
1387 | ||
1388 | if (target->state == SRP_TARGET_DEAD || | |
1389 | target->state == SRP_TARGET_REMOVED) | |
1390 | return -ENODEV; | |
1391 | ||
1392 | return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey)); | |
1393 | } | |
1394 | ||
1395 | static ssize_t show_dgid(struct class_device *cdev, char *buf) | |
1396 | { | |
1397 | struct srp_target_port *target = host_to_target(class_to_shost(cdev)); | |
1398 | ||
1399 | if (target->state == SRP_TARGET_DEAD || | |
1400 | target->state == SRP_TARGET_REMOVED) | |
1401 | return -ENODEV; | |
1402 | ||
1403 | return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", | |
1404 | be16_to_cpu(((__be16 *) target->path.dgid.raw)[0]), | |
1405 | be16_to_cpu(((__be16 *) target->path.dgid.raw)[1]), | |
1406 | be16_to_cpu(((__be16 *) target->path.dgid.raw)[2]), | |
1407 | be16_to_cpu(((__be16 *) target->path.dgid.raw)[3]), | |
1408 | be16_to_cpu(((__be16 *) target->path.dgid.raw)[4]), | |
1409 | be16_to_cpu(((__be16 *) target->path.dgid.raw)[5]), | |
1410 | be16_to_cpu(((__be16 *) target->path.dgid.raw)[6]), | |
1411 | be16_to_cpu(((__be16 *) target->path.dgid.raw)[7])); | |
1412 | } | |
1413 | ||
1414 | static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); | |
1415 | static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); | |
1416 | static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); | |
1417 | static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); | |
1418 | static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); | |
1419 | ||
1420 | static struct class_device_attribute *srp_host_attrs[] = { | |
1421 | &class_device_attr_id_ext, | |
1422 | &class_device_attr_ioc_guid, | |
1423 | &class_device_attr_service_id, | |
1424 | &class_device_attr_pkey, | |
1425 | &class_device_attr_dgid, | |
1426 | NULL | |
1427 | }; | |
1428 | ||
aef9ec39 RD |
1429 | static struct scsi_host_template srp_template = { |
1430 | .module = THIS_MODULE, | |
1431 | .name = DRV_NAME, | |
1432 | .info = srp_target_info, | |
1433 | .queuecommand = srp_queuecommand, | |
1434 | .eh_abort_handler = srp_abort, | |
1435 | .eh_device_reset_handler = srp_reset_device, | |
1436 | .eh_host_reset_handler = srp_reset_host, | |
1437 | .can_queue = SRP_SQ_SIZE, | |
1438 | .this_id = -1, | |
1439 | .sg_tablesize = SRP_MAX_INDIRECT, | |
1440 | .cmd_per_lun = SRP_SQ_SIZE, | |
6ecb0c84 RD |
1441 | .use_clustering = ENABLE_CLUSTERING, |
1442 | .shost_attrs = srp_host_attrs | |
aef9ec39 RD |
1443 | }; |
1444 | ||
1445 | static int srp_add_target(struct srp_host *host, struct srp_target_port *target) | |
1446 | { | |
1447 | sprintf(target->target_name, "SRP.T10:%016llX", | |
1448 | (unsigned long long) be64_to_cpu(target->id_ext)); | |
1449 | ||
f5358a17 | 1450 | if (scsi_add_host(target->scsi_host, host->dev->dev->dma_device)) |
aef9ec39 RD |
1451 | return -ENODEV; |
1452 | ||
8e9e5f4f | 1453 | mutex_lock(&host->target_mutex); |
aef9ec39 | 1454 | list_add_tail(&target->list, &host->target_list); |
8e9e5f4f | 1455 | mutex_unlock(&host->target_mutex); |
aef9ec39 RD |
1456 | |
1457 | target->state = SRP_TARGET_LIVE; | |
1458 | ||
aef9ec39 | 1459 | scsi_scan_target(&target->scsi_host->shost_gendev, |
1962a4a1 | 1460 | 0, target->scsi_id, SCAN_WILD_CARD, 0); |
aef9ec39 RD |
1461 | |
1462 | return 0; | |
1463 | } | |
1464 | ||
1465 | static void srp_release_class_dev(struct class_device *class_dev) | |
1466 | { | |
1467 | struct srp_host *host = | |
1468 | container_of(class_dev, struct srp_host, class_dev); | |
1469 | ||
1470 | complete(&host->released); | |
1471 | } | |
1472 | ||
1473 | static struct class srp_class = { | |
1474 | .name = "infiniband_srp", | |
1475 | .release = srp_release_class_dev | |
1476 | }; | |
1477 | ||
1478 | /* | |
1479 | * Target ports are added by writing | |
1480 | * | |
1481 | * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>, | |
1482 | * pkey=<P_Key>,service_id=<service ID> | |
1483 | * | |
1484 | * to the add_target sysfs attribute. | |
1485 | */ | |
1486 | enum { | |
1487 | SRP_OPT_ERR = 0, | |
1488 | SRP_OPT_ID_EXT = 1 << 0, | |
1489 | SRP_OPT_IOC_GUID = 1 << 1, | |
1490 | SRP_OPT_DGID = 1 << 2, | |
1491 | SRP_OPT_PKEY = 1 << 3, | |
1492 | SRP_OPT_SERVICE_ID = 1 << 4, | |
1493 | SRP_OPT_MAX_SECT = 1 << 5, | |
1494 | SRP_OPT_ALL = (SRP_OPT_ID_EXT | | |
1495 | SRP_OPT_IOC_GUID | | |
1496 | SRP_OPT_DGID | | |
1497 | SRP_OPT_PKEY | | |
1498 | SRP_OPT_SERVICE_ID), | |
1499 | }; | |
1500 | ||
1501 | static match_table_t srp_opt_tokens = { | |
1502 | { SRP_OPT_ID_EXT, "id_ext=%s" }, | |
1503 | { SRP_OPT_IOC_GUID, "ioc_guid=%s" }, | |
1504 | { SRP_OPT_DGID, "dgid=%s" }, | |
1505 | { SRP_OPT_PKEY, "pkey=%x" }, | |
1506 | { SRP_OPT_SERVICE_ID, "service_id=%s" }, | |
1507 | { SRP_OPT_MAX_SECT, "max_sect=%d" }, | |
1508 | { SRP_OPT_ERR, NULL } | |
1509 | }; | |
1510 | ||
1511 | static int srp_parse_options(const char *buf, struct srp_target_port *target) | |
1512 | { | |
1513 | char *options, *sep_opt; | |
1514 | char *p; | |
1515 | char dgid[3]; | |
1516 | substring_t args[MAX_OPT_ARGS]; | |
1517 | int opt_mask = 0; | |
1518 | int token; | |
1519 | int ret = -EINVAL; | |
1520 | int i; | |
1521 | ||
1522 | options = kstrdup(buf, GFP_KERNEL); | |
1523 | if (!options) | |
1524 | return -ENOMEM; | |
1525 | ||
1526 | sep_opt = options; | |
1527 | while ((p = strsep(&sep_opt, ",")) != NULL) { | |
1528 | if (!*p) | |
1529 | continue; | |
1530 | ||
1531 | token = match_token(p, srp_opt_tokens, args); | |
1532 | opt_mask |= token; | |
1533 | ||
1534 | switch (token) { | |
1535 | case SRP_OPT_ID_EXT: | |
1536 | p = match_strdup(args); | |
1537 | target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16)); | |
1538 | kfree(p); | |
1539 | break; | |
1540 | ||
1541 | case SRP_OPT_IOC_GUID: | |
1542 | p = match_strdup(args); | |
1543 | target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16)); | |
1544 | kfree(p); | |
1545 | break; | |
1546 | ||
1547 | case SRP_OPT_DGID: | |
1548 | p = match_strdup(args); | |
1549 | if (strlen(p) != 32) { | |
1550 | printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p); | |
ce1823f0 | 1551 | kfree(p); |
aef9ec39 RD |
1552 | goto out; |
1553 | } | |
1554 | ||
1555 | for (i = 0; i < 16; ++i) { | |
1556 | strlcpy(dgid, p + i * 2, 3); | |
1557 | target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16); | |
1558 | } | |
bf17c1c7 | 1559 | kfree(p); |
aef9ec39 RD |
1560 | break; |
1561 | ||
1562 | case SRP_OPT_PKEY: | |
1563 | if (match_hex(args, &token)) { | |
1564 | printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p); | |
1565 | goto out; | |
1566 | } | |
1567 | target->path.pkey = cpu_to_be16(token); | |
1568 | break; | |
1569 | ||
1570 | case SRP_OPT_SERVICE_ID: | |
1571 | p = match_strdup(args); | |
1572 | target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16)); | |
1573 | kfree(p); | |
1574 | break; | |
1575 | ||
1576 | case SRP_OPT_MAX_SECT: | |
1577 | if (match_int(args, &token)) { | |
1578 | printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p); | |
1579 | goto out; | |
1580 | } | |
1581 | target->scsi_host->max_sectors = token; | |
1582 | break; | |
1583 | ||
1584 | default: | |
1585 | printk(KERN_WARNING PFX "unknown parameter or missing value " | |
1586 | "'%s' in target creation request\n", p); | |
1587 | goto out; | |
1588 | } | |
1589 | } | |
1590 | ||
1591 | if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL) | |
1592 | ret = 0; | |
1593 | else | |
1594 | for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) | |
1595 | if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && | |
1596 | !(srp_opt_tokens[i].token & opt_mask)) | |
1597 | printk(KERN_WARNING PFX "target creation request is " | |
1598 | "missing parameter '%s'\n", | |
1599 | srp_opt_tokens[i].pattern); | |
1600 | ||
1601 | out: | |
1602 | kfree(options); | |
1603 | return ret; | |
1604 | } | |
1605 | ||
1606 | static ssize_t srp_create_target(struct class_device *class_dev, | |
1607 | const char *buf, size_t count) | |
1608 | { | |
1609 | struct srp_host *host = | |
1610 | container_of(class_dev, struct srp_host, class_dev); | |
1611 | struct Scsi_Host *target_host; | |
1612 | struct srp_target_port *target; | |
1613 | int ret; | |
1614 | int i; | |
1615 | ||
1616 | target_host = scsi_host_alloc(&srp_template, | |
1617 | sizeof (struct srp_target_port)); | |
1618 | if (!target_host) | |
1619 | return -ENOMEM; | |
1620 | ||
5f068992 RD |
1621 | target_host->max_lun = SRP_MAX_LUN; |
1622 | ||
aef9ec39 RD |
1623 | target = host_to_target(target_host); |
1624 | memset(target, 0, sizeof *target); | |
1625 | ||
1626 | target->scsi_host = target_host; | |
1627 | target->srp_host = host; | |
1628 | ||
1629 | INIT_WORK(&target->work, srp_reconnect_work, target); | |
1630 | ||
d945e1df | 1631 | INIT_LIST_HEAD(&target->free_reqs); |
aef9ec39 | 1632 | INIT_LIST_HEAD(&target->req_queue); |
d945e1df RD |
1633 | for (i = 0; i < SRP_SQ_SIZE; ++i) { |
1634 | target->req_ring[i].index = i; | |
1635 | list_add_tail(&target->req_ring[i].list, &target->free_reqs); | |
1636 | } | |
aef9ec39 RD |
1637 | |
1638 | ret = srp_parse_options(buf, target); | |
1639 | if (ret) | |
1640 | goto err; | |
1641 | ||
f5358a17 | 1642 | ib_get_cached_gid(host->dev->dev, host->port, 0, &target->path.sgid); |
aef9ec39 RD |
1643 | |
1644 | printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x " | |
1645 | "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", | |
1646 | (unsigned long long) be64_to_cpu(target->id_ext), | |
1647 | (unsigned long long) be64_to_cpu(target->ioc_guid), | |
1648 | be16_to_cpu(target->path.pkey), | |
1649 | (unsigned long long) be64_to_cpu(target->service_id), | |
1650 | (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[0]), | |
1651 | (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[2]), | |
1652 | (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[4]), | |
1653 | (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[6]), | |
1654 | (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[8]), | |
1655 | (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[10]), | |
1656 | (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[12]), | |
1657 | (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[14])); | |
1658 | ||
1659 | ret = srp_create_target_ib(target); | |
1660 | if (ret) | |
1661 | goto err; | |
1662 | ||
f5358a17 | 1663 | target->cm_id = ib_create_cm_id(host->dev->dev, srp_cm_handler, target); |
aef9ec39 RD |
1664 | if (IS_ERR(target->cm_id)) { |
1665 | ret = PTR_ERR(target->cm_id); | |
1666 | goto err_free; | |
1667 | } | |
1668 | ||
1669 | ret = srp_connect_target(target); | |
1670 | if (ret) { | |
1671 | printk(KERN_ERR PFX "Connection failed\n"); | |
1672 | goto err_cm_id; | |
1673 | } | |
1674 | ||
1675 | ret = srp_add_target(host, target); | |
1676 | if (ret) | |
1677 | goto err_disconnect; | |
1678 | ||
1679 | return count; | |
1680 | ||
1681 | err_disconnect: | |
1682 | srp_disconnect_target(target); | |
1683 | ||
1684 | err_cm_id: | |
1685 | ib_destroy_cm_id(target->cm_id); | |
1686 | ||
1687 | err_free: | |
1688 | srp_free_target_ib(target); | |
1689 | ||
1690 | err: | |
1691 | scsi_host_put(target_host); | |
1692 | ||
1693 | return ret; | |
1694 | } | |
1695 | ||
1696 | static CLASS_DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); | |
1697 | ||
1698 | static ssize_t show_ibdev(struct class_device *class_dev, char *buf) | |
1699 | { | |
1700 | struct srp_host *host = | |
1701 | container_of(class_dev, struct srp_host, class_dev); | |
1702 | ||
f5358a17 | 1703 | return sprintf(buf, "%s\n", host->dev->dev->name); |
aef9ec39 RD |
1704 | } |
1705 | ||
1706 | static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); | |
1707 | ||
1708 | static ssize_t show_port(struct class_device *class_dev, char *buf) | |
1709 | { | |
1710 | struct srp_host *host = | |
1711 | container_of(class_dev, struct srp_host, class_dev); | |
1712 | ||
1713 | return sprintf(buf, "%d\n", host->port); | |
1714 | } | |
1715 | ||
1716 | static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL); | |
1717 | ||
f5358a17 | 1718 | static struct srp_host *srp_add_port(struct srp_device *device, u8 port) |
aef9ec39 RD |
1719 | { |
1720 | struct srp_host *host; | |
1721 | ||
1722 | host = kzalloc(sizeof *host, GFP_KERNEL); | |
1723 | if (!host) | |
1724 | return NULL; | |
1725 | ||
1726 | INIT_LIST_HEAD(&host->target_list); | |
8e9e5f4f | 1727 | mutex_init(&host->target_mutex); |
aef9ec39 RD |
1728 | init_completion(&host->released); |
1729 | host->dev = device; | |
1730 | host->port = port; | |
1731 | ||
1732 | host->initiator_port_id[7] = port; | |
f5358a17 | 1733 | memcpy(host->initiator_port_id + 8, &device->dev->node_guid, 8); |
aef9ec39 RD |
1734 | |
1735 | host->class_dev.class = &srp_class; | |
f5358a17 | 1736 | host->class_dev.dev = device->dev->dma_device; |
aef9ec39 | 1737 | snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d", |
f5358a17 | 1738 | device->dev->name, port); |
aef9ec39 RD |
1739 | |
1740 | if (class_device_register(&host->class_dev)) | |
f5358a17 | 1741 | goto free_host; |
aef9ec39 RD |
1742 | if (class_device_create_file(&host->class_dev, &class_device_attr_add_target)) |
1743 | goto err_class; | |
1744 | if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev)) | |
1745 | goto err_class; | |
1746 | if (class_device_create_file(&host->class_dev, &class_device_attr_port)) | |
1747 | goto err_class; | |
1748 | ||
1749 | return host; | |
1750 | ||
1751 | err_class: | |
1752 | class_device_unregister(&host->class_dev); | |
1753 | ||
f5358a17 | 1754 | free_host: |
aef9ec39 RD |
1755 | kfree(host); |
1756 | ||
1757 | return NULL; | |
1758 | } | |
1759 | ||
1760 | static void srp_add_one(struct ib_device *device) | |
1761 | { | |
f5358a17 RD |
1762 | struct srp_device *srp_dev; |
1763 | struct ib_device_attr *dev_attr; | |
1764 | struct ib_fmr_pool_param fmr_param; | |
aef9ec39 | 1765 | struct srp_host *host; |
aef9ec39 RD |
1766 | int s, e, p; |
1767 | ||
f5358a17 RD |
1768 | dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); |
1769 | if (!dev_attr) | |
cf311cd4 | 1770 | return; |
aef9ec39 | 1771 | |
f5358a17 RD |
1772 | if (ib_query_device(device, dev_attr)) { |
1773 | printk(KERN_WARNING PFX "Query device failed for %s\n", | |
1774 | device->name); | |
1775 | goto free_attr; | |
1776 | } | |
1777 | ||
1778 | srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL); | |
1779 | if (!srp_dev) | |
1780 | goto free_attr; | |
1781 | ||
1782 | /* | |
1783 | * Use the smallest page size supported by the HCA, down to a | |
1784 | * minimum of 512 bytes (which is the smallest sector that a | |
1785 | * SCSI command will ever carry). | |
1786 | */ | |
1787 | srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1); | |
1788 | srp_dev->fmr_page_size = 1 << srp_dev->fmr_page_shift; | |
1789 | srp_dev->fmr_page_mask = ~((unsigned long) srp_dev->fmr_page_size - 1); | |
1790 | ||
1791 | INIT_LIST_HEAD(&srp_dev->dev_list); | |
1792 | ||
1793 | srp_dev->dev = device; | |
1794 | srp_dev->pd = ib_alloc_pd(device); | |
1795 | if (IS_ERR(srp_dev->pd)) | |
1796 | goto free_dev; | |
1797 | ||
1798 | srp_dev->mr = ib_get_dma_mr(srp_dev->pd, | |
1799 | IB_ACCESS_LOCAL_WRITE | | |
1800 | IB_ACCESS_REMOTE_READ | | |
1801 | IB_ACCESS_REMOTE_WRITE); | |
1802 | if (IS_ERR(srp_dev->mr)) | |
1803 | goto err_pd; | |
1804 | ||
1805 | memset(&fmr_param, 0, sizeof fmr_param); | |
1806 | fmr_param.pool_size = SRP_FMR_POOL_SIZE; | |
1807 | fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE; | |
1808 | fmr_param.cache = 1; | |
1809 | fmr_param.max_pages_per_fmr = SRP_FMR_SIZE; | |
1810 | fmr_param.page_shift = srp_dev->fmr_page_shift; | |
1811 | fmr_param.access = (IB_ACCESS_LOCAL_WRITE | | |
1812 | IB_ACCESS_REMOTE_WRITE | | |
1813 | IB_ACCESS_REMOTE_READ); | |
1814 | ||
1815 | srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param); | |
1816 | if (IS_ERR(srp_dev->fmr_pool)) | |
1817 | srp_dev->fmr_pool = NULL; | |
aef9ec39 RD |
1818 | |
1819 | if (device->node_type == IB_NODE_SWITCH) { | |
1820 | s = 0; | |
1821 | e = 0; | |
1822 | } else { | |
1823 | s = 1; | |
1824 | e = device->phys_port_cnt; | |
1825 | } | |
1826 | ||
1827 | for (p = s; p <= e; ++p) { | |
f5358a17 | 1828 | host = srp_add_port(srp_dev, p); |
aef9ec39 | 1829 | if (host) |
f5358a17 | 1830 | list_add_tail(&host->list, &srp_dev->dev_list); |
aef9ec39 RD |
1831 | } |
1832 | ||
f5358a17 RD |
1833 | ib_set_client_data(device, &srp_client, srp_dev); |
1834 | ||
1835 | goto free_attr; | |
1836 | ||
1837 | err_pd: | |
1838 | ib_dealloc_pd(srp_dev->pd); | |
1839 | ||
1840 | free_dev: | |
1841 | kfree(srp_dev); | |
1842 | ||
1843 | free_attr: | |
1844 | kfree(dev_attr); | |
aef9ec39 RD |
1845 | } |
1846 | ||
1847 | static void srp_remove_one(struct ib_device *device) | |
1848 | { | |
f5358a17 | 1849 | struct srp_device *srp_dev; |
aef9ec39 RD |
1850 | struct srp_host *host, *tmp_host; |
1851 | LIST_HEAD(target_list); | |
1852 | struct srp_target_port *target, *tmp_target; | |
1853 | unsigned long flags; | |
1854 | ||
f5358a17 | 1855 | srp_dev = ib_get_client_data(device, &srp_client); |
aef9ec39 | 1856 | |
f5358a17 | 1857 | list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { |
aef9ec39 RD |
1858 | class_device_unregister(&host->class_dev); |
1859 | /* | |
1860 | * Wait for the sysfs entry to go away, so that no new | |
1861 | * target ports can be created. | |
1862 | */ | |
1863 | wait_for_completion(&host->released); | |
1864 | ||
1865 | /* | |
1866 | * Mark all target ports as removed, so we stop queueing | |
1867 | * commands and don't try to reconnect. | |
1868 | */ | |
8e9e5f4f | 1869 | mutex_lock(&host->target_mutex); |
549c5fc2 | 1870 | list_for_each_entry(target, &host->target_list, list) { |
aef9ec39 RD |
1871 | spin_lock_irqsave(target->scsi_host->host_lock, flags); |
1872 | if (target->state != SRP_TARGET_REMOVED) | |
1873 | target->state = SRP_TARGET_REMOVED; | |
1874 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | |
1875 | } | |
8e9e5f4f | 1876 | mutex_unlock(&host->target_mutex); |
aef9ec39 RD |
1877 | |
1878 | /* | |
1879 | * Wait for any reconnection tasks that may have | |
1880 | * started before we marked our target ports as | |
1881 | * removed, and any target port removal tasks. | |
1882 | */ | |
1883 | flush_scheduled_work(); | |
1884 | ||
1885 | list_for_each_entry_safe(target, tmp_target, | |
1886 | &host->target_list, list) { | |
1887 | scsi_remove_host(target->scsi_host); | |
1888 | srp_disconnect_target(target); | |
1889 | ib_destroy_cm_id(target->cm_id); | |
1890 | srp_free_target_ib(target); | |
1891 | scsi_host_put(target->scsi_host); | |
1892 | } | |
1893 | ||
aef9ec39 RD |
1894 | kfree(host); |
1895 | } | |
1896 | ||
f5358a17 RD |
1897 | if (srp_dev->fmr_pool) |
1898 | ib_destroy_fmr_pool(srp_dev->fmr_pool); | |
1899 | ib_dereg_mr(srp_dev->mr); | |
1900 | ib_dealloc_pd(srp_dev->pd); | |
1901 | ||
1902 | kfree(srp_dev); | |
aef9ec39 RD |
1903 | } |
1904 | ||
1905 | static int __init srp_init_module(void) | |
1906 | { | |
1907 | int ret; | |
1908 | ||
1909 | ret = class_register(&srp_class); | |
1910 | if (ret) { | |
1911 | printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); | |
1912 | return ret; | |
1913 | } | |
1914 | ||
1915 | ret = ib_register_client(&srp_client); | |
1916 | if (ret) { | |
1917 | printk(KERN_ERR PFX "couldn't register IB client\n"); | |
1918 | class_unregister(&srp_class); | |
1919 | return ret; | |
1920 | } | |
1921 | ||
1922 | return 0; | |
1923 | } | |
1924 | ||
1925 | static void __exit srp_cleanup_module(void) | |
1926 | { | |
1927 | ib_unregister_client(&srp_client); | |
1928 | class_unregister(&srp_class); | |
1929 | } | |
1930 | ||
1931 | module_init(srp_init_module); | |
1932 | module_exit(srp_cleanup_module); |