]>
Commit | Line | Data |
---|---|---|
c66ac9db NB |
1 | /******************************************************************************* |
2 | * Filename: target_core_iblock.c | |
3 | * | |
4 | * This file contains the Storage Engine <-> Linux BlockIO transport | |
5 | * specific functions. | |
6 | * | |
7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | |
8 | * Copyright (c) 2005, 2006, 2007 SBE, Inc. | |
9 | * Copyright (c) 2007-2010 Rising Tide Systems | |
10 | * Copyright (c) 2008-2010 Linux-iSCSI.org | |
11 | * | |
12 | * Nicholas A. Bellinger <[email protected]> | |
13 | * | |
14 | * This program is free software; you can redistribute it and/or modify | |
15 | * it under the terms of the GNU General Public License as published by | |
16 | * the Free Software Foundation; either version 2 of the License, or | |
17 | * (at your option) any later version. | |
18 | * | |
19 | * This program is distributed in the hope that it will be useful, | |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
22 | * GNU General Public License for more details. | |
23 | * | |
24 | * You should have received a copy of the GNU General Public License | |
25 | * along with this program; if not, write to the Free Software | |
26 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
27 | * | |
28 | ******************************************************************************/ | |
29 | ||
30 | #include <linux/version.h> | |
31 | #include <linux/string.h> | |
32 | #include <linux/parser.h> | |
33 | #include <linux/timer.h> | |
34 | #include <linux/fs.h> | |
35 | #include <linux/blkdev.h> | |
36 | #include <linux/slab.h> | |
37 | #include <linux/spinlock.h> | |
c66ac9db NB |
38 | #include <linux/bio.h> |
39 | #include <linux/genhd.h> | |
40 | #include <linux/file.h> | |
41 | #include <scsi/scsi.h> | |
42 | #include <scsi/scsi_host.h> | |
43 | ||
44 | #include <target/target_core_base.h> | |
45 | #include <target/target_core_device.h> | |
46 | #include <target/target_core_transport.h> | |
47 | ||
48 | #include "target_core_iblock.h" | |
49 | ||
c66ac9db NB |
50 | static struct se_subsystem_api iblock_template; |
51 | ||
52 | static void iblock_bio_done(struct bio *, int); | |
53 | ||
54 | /* iblock_attach_hba(): (Part of se_subsystem_api_t template) | |
55 | * | |
56 | * | |
57 | */ | |
58 | static int iblock_attach_hba(struct se_hba *hba, u32 host_id) | |
59 | { | |
60 | struct iblock_hba *ib_host; | |
61 | ||
62 | ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL); | |
6708bb27 AG |
63 | if (!ib_host) { |
64 | pr_err("Unable to allocate memory for" | |
c66ac9db NB |
65 | " struct iblock_hba\n"); |
66 | return -ENOMEM; | |
67 | } | |
68 | ||
69 | ib_host->iblock_host_id = host_id; | |
70 | ||
5951146d | 71 | hba->hba_ptr = ib_host; |
c66ac9db | 72 | |
6708bb27 | 73 | pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" |
c66ac9db NB |
74 | " Generic Target Core Stack %s\n", hba->hba_id, |
75 | IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); | |
76 | ||
6708bb27 | 77 | pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n", |
e3d6f909 | 78 | hba->hba_id, ib_host->iblock_host_id); |
c66ac9db NB |
79 | |
80 | return 0; | |
81 | } | |
82 | ||
83 | static void iblock_detach_hba(struct se_hba *hba) | |
84 | { | |
85 | struct iblock_hba *ib_host = hba->hba_ptr; | |
86 | ||
6708bb27 | 87 | pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" |
c66ac9db NB |
88 | " Target Core\n", hba->hba_id, ib_host->iblock_host_id); |
89 | ||
90 | kfree(ib_host); | |
91 | hba->hba_ptr = NULL; | |
92 | } | |
93 | ||
94 | static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) | |
95 | { | |
96 | struct iblock_dev *ib_dev = NULL; | |
97 | struct iblock_hba *ib_host = hba->hba_ptr; | |
98 | ||
99 | ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); | |
6708bb27 AG |
100 | if (!ib_dev) { |
101 | pr_err("Unable to allocate struct iblock_dev\n"); | |
c66ac9db NB |
102 | return NULL; |
103 | } | |
104 | ib_dev->ibd_host = ib_host; | |
105 | ||
6708bb27 | 106 | pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); |
c66ac9db NB |
107 | |
108 | return ib_dev; | |
109 | } | |
110 | ||
111 | static struct se_device *iblock_create_virtdevice( | |
112 | struct se_hba *hba, | |
113 | struct se_subsystem_dev *se_dev, | |
114 | void *p) | |
115 | { | |
116 | struct iblock_dev *ib_dev = p; | |
117 | struct se_device *dev; | |
118 | struct se_dev_limits dev_limits; | |
119 | struct block_device *bd = NULL; | |
120 | struct request_queue *q; | |
121 | struct queue_limits *limits; | |
122 | u32 dev_flags = 0; | |
613640e4 | 123 | int ret = -EINVAL; |
c66ac9db | 124 | |
6708bb27 AG |
125 | if (!ib_dev) { |
126 | pr_err("Unable to locate struct iblock_dev parameter\n"); | |
613640e4 | 127 | return ERR_PTR(ret); |
c66ac9db NB |
128 | } |
129 | memset(&dev_limits, 0, sizeof(struct se_dev_limits)); | |
130 | /* | |
131 | * These settings need to be made tunable.. | |
132 | */ | |
133 | ib_dev->ibd_bio_set = bioset_create(32, 64); | |
6708bb27 AG |
134 | if (!ib_dev->ibd_bio_set) { |
135 | pr_err("IBLOCK: Unable to create bioset()\n"); | |
613640e4 | 136 | return ERR_PTR(-ENOMEM); |
c66ac9db | 137 | } |
6708bb27 | 138 | pr_debug("IBLOCK: Created bio_set()\n"); |
c66ac9db NB |
139 | /* |
140 | * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path | |
141 | * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. | |
142 | */ | |
6708bb27 | 143 | pr_debug( "IBLOCK: Claiming struct block_device: %s\n", |
c66ac9db NB |
144 | ib_dev->ibd_udev_path); |
145 | ||
146 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, | |
147 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); | |
613640e4 NB |
148 | if (IS_ERR(bd)) { |
149 | ret = PTR_ERR(bd); | |
c66ac9db | 150 | goto failed; |
613640e4 | 151 | } |
c66ac9db NB |
152 | /* |
153 | * Setup the local scope queue_limits from struct request_queue->limits | |
154 | * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. | |
155 | */ | |
156 | q = bdev_get_queue(bd); | |
157 | limits = &dev_limits.limits; | |
158 | limits->logical_block_size = bdev_logical_block_size(bd); | |
159 | limits->max_hw_sectors = queue_max_hw_sectors(q); | |
160 | limits->max_sectors = queue_max_sectors(q); | |
8f3d14e2 NB |
161 | dev_limits.hw_queue_depth = q->nr_requests; |
162 | dev_limits.queue_depth = q->nr_requests; | |
c66ac9db | 163 | |
c66ac9db NB |
164 | ib_dev->ibd_bd = bd; |
165 | ||
166 | dev = transport_add_device_to_core_hba(hba, | |
5951146d | 167 | &iblock_template, se_dev, dev_flags, ib_dev, |
c66ac9db | 168 | &dev_limits, "IBLOCK", IBLOCK_VERSION); |
6708bb27 | 169 | if (!dev) |
c66ac9db NB |
170 | goto failed; |
171 | ||
c66ac9db NB |
172 | /* |
173 | * Check if the underlying struct block_device request_queue supports | |
174 | * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM | |
175 | * in ATA and we need to set TPE=1 | |
176 | */ | |
613640e4 | 177 | if (blk_queue_discard(q)) { |
e3d6f909 | 178 | dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = |
c66ac9db NB |
179 | q->limits.max_discard_sectors; |
180 | /* | |
181 | * Currently hardcoded to 1 in Linux/SCSI code.. | |
182 | */ | |
e3d6f909 AG |
183 | dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; |
184 | dev->se_sub_dev->se_dev_attrib.unmap_granularity = | |
c66ac9db | 185 | q->limits.discard_granularity; |
e3d6f909 | 186 | dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = |
c66ac9db NB |
187 | q->limits.discard_alignment; |
188 | ||
6708bb27 | 189 | pr_debug("IBLOCK: BLOCK Discard support available," |
c66ac9db NB |
190 | " disabled by default\n"); |
191 | } | |
192 | ||
e22a7f07 RD |
193 | if (blk_queue_nonrot(q)) |
194 | dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; | |
195 | ||
c66ac9db NB |
196 | return dev; |
197 | ||
198 | failed: | |
199 | if (ib_dev->ibd_bio_set) { | |
200 | bioset_free(ib_dev->ibd_bio_set); | |
201 | ib_dev->ibd_bio_set = NULL; | |
202 | } | |
203 | ib_dev->ibd_bd = NULL; | |
613640e4 | 204 | return ERR_PTR(ret); |
c66ac9db NB |
205 | } |
206 | ||
207 | static void iblock_free_device(void *p) | |
208 | { | |
209 | struct iblock_dev *ib_dev = p; | |
210 | ||
bc665524 NB |
211 | if (ib_dev->ibd_bd != NULL) |
212 | blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); | |
213 | if (ib_dev->ibd_bio_set != NULL) | |
214 | bioset_free(ib_dev->ibd_bio_set); | |
c66ac9db NB |
215 | kfree(ib_dev); |
216 | } | |
217 | ||
218 | static inline struct iblock_req *IBLOCK_REQ(struct se_task *task) | |
219 | { | |
220 | return container_of(task, struct iblock_req, ib_task); | |
221 | } | |
222 | ||
223 | static struct se_task * | |
6708bb27 | 224 | iblock_alloc_task(unsigned char *cdb) |
c66ac9db NB |
225 | { |
226 | struct iblock_req *ib_req; | |
227 | ||
228 | ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); | |
6708bb27 AG |
229 | if (!ib_req) { |
230 | pr_err("Unable to allocate memory for struct iblock_req\n"); | |
c66ac9db NB |
231 | return NULL; |
232 | } | |
233 | ||
c66ac9db NB |
234 | atomic_set(&ib_req->ib_bio_cnt, 0); |
235 | return &ib_req->ib_task; | |
236 | } | |
237 | ||
238 | static unsigned long long iblock_emulate_read_cap_with_block_size( | |
239 | struct se_device *dev, | |
240 | struct block_device *bd, | |
241 | struct request_queue *q) | |
242 | { | |
243 | unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode), | |
244 | bdev_logical_block_size(bd)) - 1); | |
245 | u32 block_size = bdev_logical_block_size(bd); | |
246 | ||
e3d6f909 | 247 | if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) |
c66ac9db NB |
248 | return blocks_long; |
249 | ||
250 | switch (block_size) { | |
251 | case 4096: | |
e3d6f909 | 252 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { |
c66ac9db NB |
253 | case 2048: |
254 | blocks_long <<= 1; | |
255 | break; | |
256 | case 1024: | |
257 | blocks_long <<= 2; | |
258 | break; | |
259 | case 512: | |
260 | blocks_long <<= 3; | |
261 | default: | |
262 | break; | |
263 | } | |
264 | break; | |
265 | case 2048: | |
e3d6f909 | 266 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { |
c66ac9db NB |
267 | case 4096: |
268 | blocks_long >>= 1; | |
269 | break; | |
270 | case 1024: | |
271 | blocks_long <<= 1; | |
272 | break; | |
273 | case 512: | |
274 | blocks_long <<= 2; | |
275 | break; | |
276 | default: | |
277 | break; | |
278 | } | |
279 | break; | |
280 | case 1024: | |
e3d6f909 | 281 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { |
c66ac9db NB |
282 | case 4096: |
283 | blocks_long >>= 2; | |
284 | break; | |
285 | case 2048: | |
286 | blocks_long >>= 1; | |
287 | break; | |
288 | case 512: | |
289 | blocks_long <<= 1; | |
290 | break; | |
291 | default: | |
292 | break; | |
293 | } | |
294 | break; | |
295 | case 512: | |
e3d6f909 | 296 | switch (dev->se_sub_dev->se_dev_attrib.block_size) { |
c66ac9db NB |
297 | case 4096: |
298 | blocks_long >>= 3; | |
299 | break; | |
300 | case 2048: | |
301 | blocks_long >>= 2; | |
302 | break; | |
303 | case 1024: | |
304 | blocks_long >>= 1; | |
305 | break; | |
306 | default: | |
307 | break; | |
308 | } | |
309 | break; | |
310 | default: | |
311 | break; | |
312 | } | |
313 | ||
314 | return blocks_long; | |
315 | } | |
316 | ||
317 | /* | |
318 | * Emulate SYCHRONIZE_CACHE_* | |
319 | */ | |
320 | static void iblock_emulate_sync_cache(struct se_task *task) | |
321 | { | |
e3d6f909 | 322 | struct se_cmd *cmd = task->task_se_cmd; |
c66ac9db | 323 | struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; |
a1d8b49a | 324 | int immed = (cmd->t_task_cdb[1] & 0x2); |
c66ac9db NB |
325 | sector_t error_sector; |
326 | int ret; | |
327 | ||
328 | /* | |
329 | * If the Immediate bit is set, queue up the GOOD response | |
330 | * for this SYNCHRONIZE_CACHE op | |
331 | */ | |
332 | if (immed) | |
333 | transport_complete_sync_cache(cmd, 1); | |
334 | ||
335 | /* | |
336 | * blkdev_issue_flush() does not support a specifying a range, so | |
337 | * we have to flush the entire cache. | |
338 | */ | |
339 | ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector); | |
340 | if (ret != 0) { | |
6708bb27 | 341 | pr_err("IBLOCK: block_issue_flush() failed: %d " |
c66ac9db NB |
342 | " error_sector: %llu\n", ret, |
343 | (unsigned long long)error_sector); | |
344 | } | |
345 | ||
346 | if (!immed) | |
347 | transport_complete_sync_cache(cmd, ret == 0); | |
348 | } | |
349 | ||
350 | /* | |
351 | * Tell TCM Core that we are capable of WriteCache emulation for | |
352 | * an underlying struct se_device. | |
353 | */ | |
354 | static int iblock_emulated_write_cache(struct se_device *dev) | |
355 | { | |
356 | return 1; | |
357 | } | |
358 | ||
359 | static int iblock_emulated_dpo(struct se_device *dev) | |
360 | { | |
361 | return 0; | |
362 | } | |
363 | ||
364 | /* | |
365 | * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs | |
366 | * for TYPE_DISK. | |
367 | */ | |
368 | static int iblock_emulated_fua_write(struct se_device *dev) | |
369 | { | |
370 | return 1; | |
371 | } | |
372 | ||
373 | static int iblock_emulated_fua_read(struct se_device *dev) | |
374 | { | |
375 | return 0; | |
376 | } | |
377 | ||
378 | static int iblock_do_task(struct se_task *task) | |
379 | { | |
380 | struct se_device *dev = task->task_se_cmd->se_dev; | |
381 | struct iblock_req *req = IBLOCK_REQ(task); | |
c66ac9db | 382 | struct bio *bio = req->ib_bio, *nbio = NULL; |
7eaceacc | 383 | struct blk_plug plug; |
c66ac9db NB |
384 | int rw; |
385 | ||
386 | if (task->task_data_direction == DMA_TO_DEVICE) { | |
387 | /* | |
388 | * Force data to disk if we pretend to not have a volatile | |
389 | * write cache, or the initiator set the Force Unit Access bit. | |
390 | */ | |
e3d6f909 AG |
391 | if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || |
392 | (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && | |
a1d8b49a | 393 | task->task_se_cmd->t_tasks_fua)) |
c66ac9db NB |
394 | rw = WRITE_FUA; |
395 | else | |
396 | rw = WRITE; | |
397 | } else { | |
398 | rw = READ; | |
399 | } | |
400 | ||
7eaceacc | 401 | blk_start_plug(&plug); |
c66ac9db NB |
402 | while (bio) { |
403 | nbio = bio->bi_next; | |
404 | bio->bi_next = NULL; | |
6708bb27 AG |
405 | pr_debug("Calling submit_bio() task: %p bio: %p" |
406 | " bio->bi_sector: %llu\n", task, bio, | |
407 | (unsigned long long)bio->bi_sector); | |
c66ac9db NB |
408 | |
409 | submit_bio(rw, bio); | |
410 | bio = nbio; | |
411 | } | |
7eaceacc | 412 | blk_finish_plug(&plug); |
c66ac9db | 413 | |
c66ac9db NB |
414 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; |
415 | } | |
416 | ||
417 | static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) | |
418 | { | |
419 | struct iblock_dev *ibd = dev->dev_ptr; | |
420 | struct block_device *bd = ibd->ibd_bd; | |
421 | int barrier = 0; | |
422 | ||
423 | return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier); | |
424 | } | |
425 | ||
426 | static void iblock_free_task(struct se_task *task) | |
427 | { | |
428 | struct iblock_req *req = IBLOCK_REQ(task); | |
429 | struct bio *bio, *hbio = req->ib_bio; | |
430 | /* | |
431 | * We only release the bio(s) here if iblock_bio_done() has not called | |
432 | * bio_put() -> iblock_bio_destructor(). | |
433 | */ | |
434 | while (hbio != NULL) { | |
435 | bio = hbio; | |
436 | hbio = hbio->bi_next; | |
437 | bio->bi_next = NULL; | |
438 | bio_put(bio); | |
439 | } | |
440 | ||
441 | kfree(req); | |
442 | } | |
443 | ||
444 | enum { | |
445 | Opt_udev_path, Opt_force, Opt_err | |
446 | }; | |
447 | ||
448 | static match_table_t tokens = { | |
449 | {Opt_udev_path, "udev_path=%s"}, | |
450 | {Opt_force, "force=%d"}, | |
451 | {Opt_err, NULL} | |
452 | }; | |
453 | ||
454 | static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, | |
455 | struct se_subsystem_dev *se_dev, | |
456 | const char *page, ssize_t count) | |
457 | { | |
458 | struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; | |
6d180253 | 459 | char *orig, *ptr, *arg_p, *opts; |
c66ac9db | 460 | substring_t args[MAX_OPT_ARGS]; |
21bca31c | 461 | int ret = 0, token; |
c66ac9db NB |
462 | |
463 | opts = kstrdup(page, GFP_KERNEL); | |
464 | if (!opts) | |
465 | return -ENOMEM; | |
466 | ||
467 | orig = opts; | |
468 | ||
469 | while ((ptr = strsep(&opts, ",")) != NULL) { | |
470 | if (!*ptr) | |
471 | continue; | |
472 | ||
473 | token = match_token(ptr, tokens, args); | |
474 | switch (token) { | |
475 | case Opt_udev_path: | |
476 | if (ib_dev->ibd_bd) { | |
6708bb27 | 477 | pr_err("Unable to set udev_path= while" |
c66ac9db NB |
478 | " ib_dev->ibd_bd exists\n"); |
479 | ret = -EEXIST; | |
480 | goto out; | |
481 | } | |
6d180253 JJ |
482 | arg_p = match_strdup(&args[0]); |
483 | if (!arg_p) { | |
484 | ret = -ENOMEM; | |
485 | break; | |
486 | } | |
487 | snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, | |
488 | "%s", arg_p); | |
489 | kfree(arg_p); | |
6708bb27 | 490 | pr_debug("IBLOCK: Referencing UDEV path: %s\n", |
c66ac9db NB |
491 | ib_dev->ibd_udev_path); |
492 | ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; | |
493 | break; | |
494 | case Opt_force: | |
c66ac9db NB |
495 | break; |
496 | default: | |
497 | break; | |
498 | } | |
499 | } | |
500 | ||
501 | out: | |
502 | kfree(orig); | |
503 | return (!ret) ? count : ret; | |
504 | } | |
505 | ||
506 | static ssize_t iblock_check_configfs_dev_params( | |
507 | struct se_hba *hba, | |
508 | struct se_subsystem_dev *se_dev) | |
509 | { | |
510 | struct iblock_dev *ibd = se_dev->se_dev_su_ptr; | |
511 | ||
512 | if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { | |
6708bb27 | 513 | pr_err("Missing udev_path= parameters for IBLOCK\n"); |
e3d6f909 | 514 | return -EINVAL; |
c66ac9db NB |
515 | } |
516 | ||
517 | return 0; | |
518 | } | |
519 | ||
520 | static ssize_t iblock_show_configfs_dev_params( | |
521 | struct se_hba *hba, | |
522 | struct se_subsystem_dev *se_dev, | |
523 | char *b) | |
524 | { | |
525 | struct iblock_dev *ibd = se_dev->se_dev_su_ptr; | |
526 | struct block_device *bd = ibd->ibd_bd; | |
527 | char buf[BDEVNAME_SIZE]; | |
528 | ssize_t bl = 0; | |
529 | ||
530 | if (bd) | |
531 | bl += sprintf(b + bl, "iBlock device: %s", | |
532 | bdevname(bd, buf)); | |
533 | if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) { | |
534 | bl += sprintf(b + bl, " UDEV PATH: %s\n", | |
535 | ibd->ibd_udev_path); | |
536 | } else | |
537 | bl += sprintf(b + bl, "\n"); | |
538 | ||
539 | bl += sprintf(b + bl, " "); | |
540 | if (bd) { | |
541 | bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", | |
21bca31c | 542 | MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? |
c66ac9db NB |
543 | "" : (bd->bd_holder == (struct iblock_dev *)ibd) ? |
544 | "CLAIMED: IBLOCK" : "CLAIMED: OS"); | |
545 | } else { | |
21bca31c | 546 | bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); |
c66ac9db NB |
547 | } |
548 | ||
549 | return bl; | |
550 | } | |
551 | ||
552 | static void iblock_bio_destructor(struct bio *bio) | |
553 | { | |
554 | struct se_task *task = bio->bi_private; | |
555 | struct iblock_dev *ib_dev = task->se_dev->dev_ptr; | |
556 | ||
557 | bio_free(bio, ib_dev->ibd_bio_set); | |
558 | } | |
559 | ||
560 | static struct bio *iblock_get_bio( | |
561 | struct se_task *task, | |
562 | struct iblock_req *ib_req, | |
563 | struct iblock_dev *ib_dev, | |
564 | int *ret, | |
565 | sector_t lba, | |
566 | u32 sg_num) | |
567 | { | |
568 | struct bio *bio; | |
569 | ||
570 | bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); | |
6708bb27 AG |
571 | if (!bio) { |
572 | pr_err("Unable to allocate memory for bio\n"); | |
c66ac9db NB |
573 | *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; |
574 | return NULL; | |
575 | } | |
576 | ||
6708bb27 AG |
577 | pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:" |
578 | " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set); | |
579 | pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size); | |
c66ac9db NB |
580 | |
581 | bio->bi_bdev = ib_dev->ibd_bd; | |
5951146d | 582 | bio->bi_private = task; |
c66ac9db NB |
583 | bio->bi_destructor = iblock_bio_destructor; |
584 | bio->bi_end_io = &iblock_bio_done; | |
585 | bio->bi_sector = lba; | |
586 | atomic_inc(&ib_req->ib_bio_cnt); | |
587 | ||
6708bb27 AG |
588 | pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector); |
589 | pr_debug("Set ib_req->ib_bio_cnt: %d\n", | |
c66ac9db NB |
590 | atomic_read(&ib_req->ib_bio_cnt)); |
591 | return bio; | |
592 | } | |
593 | ||
1d20bb61 | 594 | static int iblock_map_data_SG(struct se_task *task) |
c66ac9db NB |
595 | { |
596 | struct se_cmd *cmd = task->task_se_cmd; | |
5951146d | 597 | struct se_device *dev = cmd->se_dev; |
c66ac9db NB |
598 | struct iblock_dev *ib_dev = task->se_dev->dev_ptr; |
599 | struct iblock_req *ib_req = IBLOCK_REQ(task); | |
600 | struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; | |
601 | struct scatterlist *sg; | |
602 | int ret = 0; | |
6708bb27 | 603 | u32 i, sg_num = task->task_sg_nents; |
c66ac9db NB |
604 | sector_t block_lba; |
605 | /* | |
606 | * Do starting conversion up from non 512-byte blocksize with | |
607 | * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. | |
608 | */ | |
e3d6f909 | 609 | if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) |
c66ac9db | 610 | block_lba = (task->task_lba << 3); |
e3d6f909 | 611 | else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) |
c66ac9db | 612 | block_lba = (task->task_lba << 2); |
e3d6f909 | 613 | else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) |
c66ac9db | 614 | block_lba = (task->task_lba << 1); |
e3d6f909 | 615 | else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) |
c66ac9db NB |
616 | block_lba = task->task_lba; |
617 | else { | |
6708bb27 | 618 | pr_err("Unsupported SCSI -> BLOCK LBA conversion:" |
e3d6f909 | 619 | " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); |
c66ac9db NB |
620 | return PYX_TRANSPORT_LU_COMM_FAILURE; |
621 | } | |
622 | ||
623 | bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); | |
6708bb27 | 624 | if (!bio) |
c66ac9db NB |
625 | return ret; |
626 | ||
627 | ib_req->ib_bio = bio; | |
628 | hbio = tbio = bio; | |
629 | /* | |
630 | * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist | |
ec98f782 | 631 | * from task->task_sg -> struct scatterlist memory. |
c66ac9db | 632 | */ |
6708bb27 AG |
633 | for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { |
634 | pr_debug("task: %p bio: %p Calling bio_add_page(): page:" | |
c66ac9db NB |
635 | " %p len: %u offset: %u\n", task, bio, sg_page(sg), |
636 | sg->length, sg->offset); | |
637 | again: | |
638 | ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset); | |
639 | if (ret != sg->length) { | |
640 | ||
6708bb27 AG |
641 | pr_debug("*** Set bio->bi_sector: %llu\n", |
642 | (unsigned long long)bio->bi_sector); | |
643 | pr_debug("** task->task_size: %u\n", | |
c66ac9db | 644 | task->task_size); |
6708bb27 | 645 | pr_debug("*** bio->bi_max_vecs: %u\n", |
c66ac9db | 646 | bio->bi_max_vecs); |
6708bb27 | 647 | pr_debug("*** bio->bi_vcnt: %u\n", |
c66ac9db NB |
648 | bio->bi_vcnt); |
649 | ||
650 | bio = iblock_get_bio(task, ib_req, ib_dev, &ret, | |
651 | block_lba, sg_num); | |
6708bb27 | 652 | if (!bio) |
c66ac9db NB |
653 | goto fail; |
654 | ||
655 | tbio = tbio->bi_next = bio; | |
6708bb27 | 656 | pr_debug("-----------------> Added +1 bio: %p to" |
c66ac9db NB |
657 | " list, Going to again\n", bio); |
658 | goto again; | |
659 | } | |
660 | /* Always in 512 byte units for Linux/Block */ | |
661 | block_lba += sg->length >> IBLOCK_LBA_SHIFT; | |
662 | sg_num--; | |
6708bb27 | 663 | pr_debug("task: %p bio-add_page() passed!, decremented" |
c66ac9db | 664 | " sg_num to %u\n", task, sg_num); |
6708bb27 AG |
665 | pr_debug("task: %p bio_add_page() passed!, increased lba" |
666 | " to %llu\n", task, (unsigned long long)block_lba); | |
667 | pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:" | |
c66ac9db NB |
668 | " %u\n", task, bio->bi_vcnt); |
669 | } | |
670 | ||
671 | return 0; | |
672 | fail: | |
673 | while (hbio) { | |
674 | bio = hbio; | |
675 | hbio = hbio->bi_next; | |
676 | bio->bi_next = NULL; | |
677 | bio_put(bio); | |
678 | } | |
679 | return ret; | |
680 | } | |
681 | ||
682 | static unsigned char *iblock_get_cdb(struct se_task *task) | |
683 | { | |
684 | return IBLOCK_REQ(task)->ib_scsi_cdb; | |
685 | } | |
686 | ||
687 | static u32 iblock_get_device_rev(struct se_device *dev) | |
688 | { | |
689 | return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ | |
690 | } | |
691 | ||
692 | static u32 iblock_get_device_type(struct se_device *dev) | |
693 | { | |
694 | return TYPE_DISK; | |
695 | } | |
696 | ||
697 | static sector_t iblock_get_blocks(struct se_device *dev) | |
698 | { | |
699 | struct iblock_dev *ibd = dev->dev_ptr; | |
700 | struct block_device *bd = ibd->ibd_bd; | |
701 | struct request_queue *q = bdev_get_queue(bd); | |
702 | ||
703 | return iblock_emulate_read_cap_with_block_size(dev, bd, q); | |
704 | } | |
705 | ||
706 | static void iblock_bio_done(struct bio *bio, int err) | |
707 | { | |
708 | struct se_task *task = bio->bi_private; | |
709 | struct iblock_req *ibr = IBLOCK_REQ(task); | |
710 | /* | |
711 | * Set -EIO if !BIO_UPTODATE and the passed is still err=0 | |
712 | */ | |
6708bb27 | 713 | if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) |
c66ac9db NB |
714 | err = -EIO; |
715 | ||
716 | if (err != 0) { | |
6708bb27 | 717 | pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," |
c66ac9db NB |
718 | " err: %d\n", bio, err); |
719 | /* | |
720 | * Bump the ib_bio_err_cnt and release bio. | |
721 | */ | |
722 | atomic_inc(&ibr->ib_bio_err_cnt); | |
723 | smp_mb__after_atomic_inc(); | |
724 | bio_put(bio); | |
725 | /* | |
726 | * Wait to complete the task until the last bio as completed. | |
727 | */ | |
6708bb27 | 728 | if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) |
c66ac9db NB |
729 | return; |
730 | ||
731 | ibr->ib_bio = NULL; | |
732 | transport_complete_task(task, 0); | |
733 | return; | |
734 | } | |
6708bb27 AG |
735 | pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", |
736 | task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err); | |
c66ac9db NB |
737 | /* |
738 | * bio_put() will call iblock_bio_destructor() to release the bio back | |
739 | * to ibr->ib_bio_set. | |
740 | */ | |
741 | bio_put(bio); | |
742 | /* | |
743 | * Wait to complete the task until the last bio as completed. | |
744 | */ | |
6708bb27 | 745 | if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) |
c66ac9db NB |
746 | return; |
747 | /* | |
748 | * Return GOOD status for task if zero ib_bio_err_cnt exists. | |
749 | */ | |
750 | ibr->ib_bio = NULL; | |
751 | transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt))); | |
752 | } | |
753 | ||
754 | static struct se_subsystem_api iblock_template = { | |
755 | .name = "iblock", | |
756 | .owner = THIS_MODULE, | |
757 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, | |
1d20bb61 | 758 | .map_data_SG = iblock_map_data_SG, |
c66ac9db NB |
759 | .attach_hba = iblock_attach_hba, |
760 | .detach_hba = iblock_detach_hba, | |
761 | .allocate_virtdevice = iblock_allocate_virtdevice, | |
762 | .create_virtdevice = iblock_create_virtdevice, | |
763 | .free_device = iblock_free_device, | |
764 | .dpo_emulated = iblock_emulated_dpo, | |
765 | .fua_write_emulated = iblock_emulated_fua_write, | |
766 | .fua_read_emulated = iblock_emulated_fua_read, | |
767 | .write_cache_emulated = iblock_emulated_write_cache, | |
768 | .alloc_task = iblock_alloc_task, | |
769 | .do_task = iblock_do_task, | |
770 | .do_discard = iblock_do_discard, | |
771 | .do_sync_cache = iblock_emulate_sync_cache, | |
772 | .free_task = iblock_free_task, | |
773 | .check_configfs_dev_params = iblock_check_configfs_dev_params, | |
774 | .set_configfs_dev_params = iblock_set_configfs_dev_params, | |
775 | .show_configfs_dev_params = iblock_show_configfs_dev_params, | |
776 | .get_cdb = iblock_get_cdb, | |
777 | .get_device_rev = iblock_get_device_rev, | |
778 | .get_device_type = iblock_get_device_type, | |
779 | .get_blocks = iblock_get_blocks, | |
780 | }; | |
781 | ||
782 | static int __init iblock_module_init(void) | |
783 | { | |
784 | return transport_subsystem_register(&iblock_template); | |
785 | } | |
786 | ||
787 | static void iblock_module_exit(void) | |
788 | { | |
789 | transport_subsystem_release(&iblock_template); | |
790 | } | |
791 | ||
792 | MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); | |
793 | MODULE_AUTHOR("[email protected]"); | |
794 | MODULE_LICENSE("GPL"); | |
795 | ||
796 | module_init(iblock_module_init); | |
797 | module_exit(iblock_module_exit); |