]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2004 SUSE LINUX Products GmbH. All rights reserved. | |
3 | * Copyright (C) 2004 Red Hat, Inc. All rights reserved. | |
4 | * | |
5 | * This file is released under the GPL. | |
6 | * | |
7 | * Multipath support for EMC CLARiiON AX/CX-series hardware. | |
8 | */ | |
9 | ||
10 | #include "dm.h" | |
11 | #include "dm-hw-handler.h" | |
12 | #include <scsi/scsi.h> | |
13 | #include <scsi/scsi_cmnd.h> | |
14 | ||
72d94861 AK |
15 | #define DM_MSG_PREFIX "multipath emc" |
16 | ||
1da177e4 LT |
17 | struct emc_handler { |
18 | spinlock_t lock; | |
19 | ||
20 | /* Whether we should send the short trespass command (FC-series) | |
21 | * or the long version (default for AX/CX CLARiiON arrays). */ | |
22 | unsigned short_trespass; | |
23 | /* Whether or not to honor SCSI reservations when initiating a | |
24 | * switch-over. Default: Don't. */ | |
25 | unsigned hr; | |
26 | ||
27 | unsigned char sense[SCSI_SENSE_BUFFERSIZE]; | |
28 | }; | |
29 | ||
30 | #define TRESPASS_PAGE 0x22 | |
31 | #define EMC_FAILOVER_TIMEOUT (60 * HZ) | |
32 | ||
33 | /* Code borrowed from dm-lsi-rdac by Mike Christie */ | |
34 | ||
35 | static inline void free_bio(struct bio *bio) | |
36 | { | |
37 | __free_page(bio->bi_io_vec[0].bv_page); | |
38 | bio_put(bio); | |
39 | } | |
40 | ||
6712ecf8 | 41 | static void emc_endio(struct bio *bio, int error) |
1da177e4 | 42 | { |
c922d5f7 | 43 | struct dm_path *path = bio->bi_private; |
1da177e4 | 44 | |
1da177e4 LT |
45 | /* We also need to look at the sense keys here whether or not to |
46 | * switch to the next PG etc. | |
47 | * | |
48 | * For now simple logic: either it works or it doesn't. | |
49 | */ | |
50 | if (error) | |
51 | dm_pg_init_complete(path, MP_FAIL_PATH); | |
52 | else | |
53 | dm_pg_init_complete(path, 0); | |
54 | ||
55 | /* request is freed in block layer */ | |
56 | free_bio(bio); | |
1da177e4 LT |
57 | } |
58 | ||
c922d5f7 | 59 | static struct bio *get_failover_bio(struct dm_path *path, unsigned data_size) |
1da177e4 LT |
60 | { |
61 | struct bio *bio; | |
62 | struct page *page; | |
63 | ||
64 | bio = bio_alloc(GFP_ATOMIC, 1); | |
65 | if (!bio) { | |
72d94861 | 66 | DMERR("get_failover_bio: bio_alloc() failed."); |
1da177e4 LT |
67 | return NULL; |
68 | } | |
69 | ||
70 | bio->bi_rw |= (1 << BIO_RW); | |
71 | bio->bi_bdev = path->dev->bdev; | |
72 | bio->bi_sector = 0; | |
73 | bio->bi_private = path; | |
74 | bio->bi_end_io = emc_endio; | |
75 | ||
76 | page = alloc_page(GFP_ATOMIC); | |
77 | if (!page) { | |
72d94861 | 78 | DMERR("get_failover_bio: alloc_page() failed."); |
1da177e4 LT |
79 | bio_put(bio); |
80 | return NULL; | |
81 | } | |
82 | ||
83 | if (bio_add_page(bio, page, data_size, 0) != data_size) { | |
d336416f | 84 | DMERR("get_failover_bio: bio_add_page() failed."); |
1da177e4 LT |
85 | __free_page(page); |
86 | bio_put(bio); | |
87 | return NULL; | |
88 | } | |
89 | ||
90 | return bio; | |
91 | } | |
92 | ||
93 | static struct request *get_failover_req(struct emc_handler *h, | |
c922d5f7 | 94 | struct bio *bio, struct dm_path *path) |
1da177e4 LT |
95 | { |
96 | struct request *rq; | |
97 | struct block_device *bdev = bio->bi_bdev; | |
98 | struct request_queue *q = bdev_get_queue(bdev); | |
99 | ||
100 | /* FIXME: Figure out why it fails with GFP_ATOMIC. */ | |
101 | rq = blk_get_request(q, WRITE, __GFP_WAIT); | |
102 | if (!rq) { | |
72d94861 | 103 | DMERR("get_failover_req: blk_get_request failed"); |
1da177e4 LT |
104 | return NULL; |
105 | } | |
106 | ||
66846572 | 107 | blk_rq_append_bio(q, rq, bio); |
1da177e4 LT |
108 | |
109 | rq->sense = h->sense; | |
110 | memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); | |
111 | rq->sense_len = 0; | |
112 | ||
1da177e4 | 113 | rq->timeout = EMC_FAILOVER_TIMEOUT; |
4aff5e23 JA |
114 | rq->cmd_type = REQ_TYPE_BLOCK_PC; |
115 | rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; | |
1da177e4 LT |
116 | |
117 | return rq; | |
118 | } | |
119 | ||
120 | static struct request *emc_trespass_get(struct emc_handler *h, | |
c922d5f7 | 121 | struct dm_path *path) |
1da177e4 LT |
122 | { |
123 | struct bio *bio; | |
124 | struct request *rq; | |
125 | unsigned char *page22; | |
126 | unsigned char long_trespass_pg[] = { | |
127 | 0, 0, 0, 0, | |
128 | TRESPASS_PAGE, /* Page code */ | |
129 | 0x09, /* Page length - 2 */ | |
130 | h->hr ? 0x01 : 0x81, /* Trespass code + Honor reservation bit */ | |
131 | 0xff, 0xff, /* Trespass target */ | |
132 | 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */ | |
133 | }; | |
134 | unsigned char short_trespass_pg[] = { | |
135 | 0, 0, 0, 0, | |
136 | TRESPASS_PAGE, /* Page code */ | |
137 | 0x02, /* Page length - 2 */ | |
138 | h->hr ? 0x01 : 0x81, /* Trespass code + Honor reservation bit */ | |
139 | 0xff, /* Trespass target */ | |
140 | }; | |
141 | unsigned data_size = h->short_trespass ? sizeof(short_trespass_pg) : | |
142 | sizeof(long_trespass_pg); | |
143 | ||
144 | /* get bio backing */ | |
145 | if (data_size > PAGE_SIZE) | |
146 | /* this should never happen */ | |
147 | return NULL; | |
148 | ||
149 | bio = get_failover_bio(path, data_size); | |
150 | if (!bio) { | |
72d94861 | 151 | DMERR("emc_trespass_get: no bio"); |
1da177e4 LT |
152 | return NULL; |
153 | } | |
154 | ||
155 | page22 = (unsigned char *)bio_data(bio); | |
156 | memset(page22, 0, data_size); | |
157 | ||
158 | memcpy(page22, h->short_trespass ? | |
159 | short_trespass_pg : long_trespass_pg, data_size); | |
160 | ||
161 | /* get request for block layer packet command */ | |
162 | rq = get_failover_req(h, bio, path); | |
163 | if (!rq) { | |
72d94861 | 164 | DMERR("emc_trespass_get: no rq"); |
1da177e4 LT |
165 | free_bio(bio); |
166 | return NULL; | |
167 | } | |
168 | ||
169 | /* Prepare the command. */ | |
170 | rq->cmd[0] = MODE_SELECT; | |
171 | rq->cmd[1] = 0x10; | |
172 | rq->cmd[4] = data_size; | |
173 | rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); | |
174 | ||
175 | return rq; | |
176 | } | |
177 | ||
178 | static void emc_pg_init(struct hw_handler *hwh, unsigned bypassed, | |
c922d5f7 | 179 | struct dm_path *path) |
1da177e4 LT |
180 | { |
181 | struct request *rq; | |
182 | struct request_queue *q = bdev_get_queue(path->dev->bdev); | |
183 | ||
184 | /* | |
185 | * We can either blindly init the pg (then look at the sense), | |
186 | * or we can send some commands to get the state here (then | |
187 | * possibly send the fo cmnd), or we can also have the | |
188 | * initial state passed into us and then get an update here. | |
189 | */ | |
190 | if (!q) { | |
72d94861 | 191 | DMINFO("emc_pg_init: no queue"); |
1da177e4 LT |
192 | goto fail_path; |
193 | } | |
194 | ||
195 | /* FIXME: The request should be pre-allocated. */ | |
196 | rq = emc_trespass_get(hwh->context, path); | |
197 | if (!rq) { | |
72d94861 | 198 | DMERR("emc_pg_init: no rq"); |
1da177e4 LT |
199 | goto fail_path; |
200 | } | |
201 | ||
72d94861 | 202 | DMINFO("emc_pg_init: sending switch-over command"); |
1da177e4 LT |
203 | elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1); |
204 | return; | |
205 | ||
206 | fail_path: | |
207 | dm_pg_init_complete(path, MP_FAIL_PATH); | |
208 | } | |
209 | ||
210 | static struct emc_handler *alloc_emc_handler(void) | |
211 | { | |
094262db | 212 | struct emc_handler *h = kzalloc(sizeof(*h), GFP_KERNEL); |
1da177e4 | 213 | |
094262db | 214 | if (h) |
1da177e4 LT |
215 | spin_lock_init(&h->lock); |
216 | ||
217 | return h; | |
218 | } | |
219 | ||
220 | static int emc_create(struct hw_handler *hwh, unsigned argc, char **argv) | |
221 | { | |
222 | struct emc_handler *h; | |
223 | unsigned hr, short_trespass; | |
224 | ||
225 | if (argc == 0) { | |
226 | /* No arguments: use defaults */ | |
227 | hr = 0; | |
228 | short_trespass = 0; | |
229 | } else if (argc != 2) { | |
72d94861 | 230 | DMWARN("incorrect number of arguments"); |
1da177e4 LT |
231 | return -EINVAL; |
232 | } else { | |
233 | if ((sscanf(argv[0], "%u", &short_trespass) != 1) | |
234 | || (short_trespass > 1)) { | |
72d94861 | 235 | DMWARN("invalid trespass mode selected"); |
1da177e4 LT |
236 | return -EINVAL; |
237 | } | |
238 | ||
239 | if ((sscanf(argv[1], "%u", &hr) != 1) | |
240 | || (hr > 1)) { | |
72d94861 | 241 | DMWARN("invalid honor reservation flag selected"); |
1da177e4 LT |
242 | return -EINVAL; |
243 | } | |
244 | } | |
245 | ||
246 | h = alloc_emc_handler(); | |
247 | if (!h) | |
248 | return -ENOMEM; | |
249 | ||
1da177e4 LT |
250 | hwh->context = h; |
251 | ||
252 | if ((h->short_trespass = short_trespass)) | |
72d94861 | 253 | DMWARN("short trespass command will be send"); |
1da177e4 | 254 | else |
72d94861 | 255 | DMWARN("long trespass command will be send"); |
1da177e4 LT |
256 | |
257 | if ((h->hr = hr)) | |
72d94861 | 258 | DMWARN("honor reservation bit will be set"); |
1da177e4 | 259 | else |
72d94861 | 260 | DMWARN("honor reservation bit will not be set (default)"); |
1da177e4 LT |
261 | |
262 | return 0; | |
263 | } | |
264 | ||
265 | static void emc_destroy(struct hw_handler *hwh) | |
266 | { | |
267 | struct emc_handler *h = (struct emc_handler *) hwh->context; | |
268 | ||
269 | kfree(h); | |
270 | hwh->context = NULL; | |
271 | } | |
272 | ||
273 | static unsigned emc_error(struct hw_handler *hwh, struct bio *bio) | |
274 | { | |
275 | /* FIXME: Patch from axboe still missing */ | |
276 | #if 0 | |
277 | int sense; | |
278 | ||
279 | if (bio->bi_error & BIO_SENSE) { | |
280 | sense = bio->bi_error & 0xffffff; /* sense key / asc / ascq */ | |
281 | ||
282 | if (sense == 0x020403) { | |
283 | /* LUN Not Ready - Manual Intervention Required | |
284 | * indicates this is a passive path. | |
285 | * | |
286 | * FIXME: However, if this is seen and EVPD C0 | |
287 | * indicates that this is due to a NDU in | |
288 | * progress, we should set FAIL_PATH too. | |
289 | * This indicates we might have to do a SCSI | |
290 | * inquiry in the end_io path. Ugh. */ | |
291 | return MP_BYPASS_PG | MP_RETRY_IO; | |
292 | } else if (sense == 0x052501) { | |
293 | /* An array based copy is in progress. Do not | |
294 | * fail the path, do not bypass to another PG, | |
295 | * do not retry. Fail the IO immediately. | |
296 | * (Actually this is the same conclusion as in | |
297 | * the default handler, but lets make sure.) */ | |
298 | return 0; | |
299 | } else if (sense == 0x062900) { | |
300 | /* Unit Attention Code. This is the first IO | |
301 | * to the new path, so just retry. */ | |
302 | return MP_RETRY_IO; | |
303 | } | |
304 | } | |
305 | #endif | |
306 | ||
307 | /* Try default handler */ | |
308 | return dm_scsi_err_handler(hwh, bio); | |
309 | } | |
310 | ||
311 | static struct hw_handler_type emc_hwh = { | |
312 | .name = "emc", | |
313 | .module = THIS_MODULE, | |
314 | .create = emc_create, | |
315 | .destroy = emc_destroy, | |
316 | .pg_init = emc_pg_init, | |
317 | .error = emc_error, | |
318 | }; | |
319 | ||
320 | static int __init dm_emc_init(void) | |
321 | { | |
322 | int r = dm_register_hw_handler(&emc_hwh); | |
323 | ||
324 | if (r < 0) | |
72d94861 | 325 | DMERR("register failed %d", r); |
1da177e4 | 326 | |
72d94861 | 327 | DMINFO("version 0.0.3 loaded"); |
1da177e4 LT |
328 | |
329 | return r; | |
330 | } | |
331 | ||
332 | static void __exit dm_emc_exit(void) | |
333 | { | |
334 | int r = dm_unregister_hw_handler(&emc_hwh); | |
335 | ||
336 | if (r < 0) | |
72d94861 | 337 | DMERR("unregister failed %d", r); |
1da177e4 LT |
338 | } |
339 | ||
340 | module_init(dm_emc_init); | |
341 | module_exit(dm_emc_exit); | |
342 | ||
343 | MODULE_DESCRIPTION(DM_NAME " EMC CX/AX/FC-family multipath"); | |
344 | MODULE_AUTHOR("Lars Marowsky-Bree <[email protected]>"); | |
345 | MODULE_LICENSE("GPL"); |