]>
Commit | Line | Data |
---|---|---|
c66ac9db NB |
1 | /******************************************************************************* |
2 | * Filename: target_core_file.c | |
3 | * | |
4 | * This file contains the Storage Engine <-> FILEIO transport specific functions | |
5 | * | |
fd9a11d7 | 6 | * (c) Copyright 2005-2012 RisingTide Systems LLC. |
c66ac9db NB |
7 | * |
8 | * Nicholas A. Bellinger <[email protected]> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License as published by | |
12 | * the Free Software Foundation; either version 2 of the License, or | |
13 | * (at your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, | |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | * GNU General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with this program; if not, write to the Free Software | |
22 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
23 | * | |
24 | ******************************************************************************/ | |
25 | ||
c66ac9db NB |
26 | #include <linux/string.h> |
27 | #include <linux/parser.h> | |
28 | #include <linux/timer.h> | |
29 | #include <linux/blkdev.h> | |
30 | #include <linux/slab.h> | |
31 | #include <linux/spinlock.h> | |
827509e3 | 32 | #include <linux/module.h> |
c66ac9db NB |
33 | #include <scsi/scsi.h> |
34 | #include <scsi/scsi_host.h> | |
35 | ||
36 | #include <target/target_core_base.h> | |
c4795fb2 | 37 | #include <target/target_core_backend.h> |
c66ac9db NB |
38 | |
39 | #include "target_core_file.h" | |
40 | ||
0fd97ccf CH |
41 | static inline struct fd_dev *FD_DEV(struct se_device *dev) |
42 | { | |
43 | return container_of(dev, struct fd_dev, dev); | |
44 | } | |
c66ac9db NB |
45 | |
46 | /* fd_attach_hba(): (Part of se_subsystem_api_t template) | |
47 | * | |
48 | * | |
49 | */ | |
50 | static int fd_attach_hba(struct se_hba *hba, u32 host_id) | |
51 | { | |
52 | struct fd_host *fd_host; | |
53 | ||
54 | fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); | |
6708bb27 AG |
55 | if (!fd_host) { |
56 | pr_err("Unable to allocate memory for struct fd_host\n"); | |
e3d6f909 | 57 | return -ENOMEM; |
c66ac9db NB |
58 | } |
59 | ||
60 | fd_host->fd_host_id = host_id; | |
61 | ||
e3d6f909 | 62 | hba->hba_ptr = fd_host; |
c66ac9db | 63 | |
6708bb27 | 64 | pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" |
c66ac9db NB |
65 | " Target Core Stack %s\n", hba->hba_id, FD_VERSION, |
66 | TARGET_CORE_MOD_VERSION); | |
6708bb27 | 67 | pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" |
e3d6f909 AG |
68 | " MaxSectors: %u\n", |
69 | hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); | |
c66ac9db NB |
70 | |
71 | return 0; | |
72 | } | |
73 | ||
74 | static void fd_detach_hba(struct se_hba *hba) | |
75 | { | |
76 | struct fd_host *fd_host = hba->hba_ptr; | |
77 | ||
6708bb27 | 78 | pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" |
c66ac9db NB |
79 | " Target Core\n", hba->hba_id, fd_host->fd_host_id); |
80 | ||
81 | kfree(fd_host); | |
82 | hba->hba_ptr = NULL; | |
83 | } | |
84 | ||
0fd97ccf | 85 | static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name) |
c66ac9db NB |
86 | { |
87 | struct fd_dev *fd_dev; | |
8359cf43 | 88 | struct fd_host *fd_host = hba->hba_ptr; |
c66ac9db NB |
89 | |
90 | fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); | |
6708bb27 AG |
91 | if (!fd_dev) { |
92 | pr_err("Unable to allocate memory for struct fd_dev\n"); | |
c66ac9db NB |
93 | return NULL; |
94 | } | |
95 | ||
96 | fd_dev->fd_host = fd_host; | |
97 | ||
6708bb27 | 98 | pr_debug("FILEIO: Allocated fd_dev for %p\n", name); |
c66ac9db | 99 | |
0fd97ccf | 100 | return &fd_dev->dev; |
c66ac9db NB |
101 | } |
102 | ||
0fd97ccf | 103 | static int fd_configure_device(struct se_device *dev) |
c66ac9db | 104 | { |
0fd97ccf CH |
105 | struct fd_dev *fd_dev = FD_DEV(dev); |
106 | struct fd_host *fd_host = dev->se_hba->hba_ptr; | |
c66ac9db NB |
107 | struct file *file; |
108 | struct inode *inode = NULL; | |
0fd97ccf | 109 | int flags, ret = -EINVAL; |
c66ac9db | 110 | |
0fd97ccf CH |
111 | if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { |
112 | pr_err("Missing fd_dev_name=\n"); | |
113 | return -EINVAL; | |
114 | } | |
c66ac9db | 115 | |
c66ac9db | 116 | /* |
a4dff304 NB |
117 | * Use O_DSYNC by default instead of O_SYNC to forgo syncing |
118 | * of pure timestamp updates. | |
c66ac9db | 119 | */ |
a4dff304 | 120 | flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; |
0fd97ccf | 121 | |
b32f4c7e NB |
122 | /* |
123 | * Optionally allow fd_buffered_io=1 to be enabled for people | |
124 | * who want use the fs buffer cache as an WriteCache mechanism. | |
125 | * | |
126 | * This means that in event of a hard failure, there is a risk | |
127 | * of silent data-loss if the SCSI client has *not* performed a | |
128 | * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE | |
129 | * to write-out the entire device cache. | |
130 | */ | |
131 | if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { | |
132 | pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n"); | |
133 | flags &= ~O_DSYNC; | |
134 | } | |
c66ac9db | 135 | |
dbc6e022 | 136 | file = filp_open(fd_dev->fd_dev_name, flags, 0600); |
613640e4 | 137 | if (IS_ERR(file)) { |
dbc6e022 | 138 | pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name); |
613640e4 NB |
139 | ret = PTR_ERR(file); |
140 | goto fail; | |
141 | } | |
c66ac9db NB |
142 | fd_dev->fd_file = file; |
143 | /* | |
144 | * If using a block backend with this struct file, we extract | |
145 | * fd_dev->fd_[block,dev]_size from struct block_device. | |
146 | * | |
147 | * Otherwise, we use the passed fd_size= from configfs | |
148 | */ | |
149 | inode = file->f_mapping->host; | |
150 | if (S_ISBLK(inode->i_mode)) { | |
0fd97ccf | 151 | struct request_queue *q = bdev_get_queue(inode->i_bdev); |
cd9323fd | 152 | unsigned long long dev_size; |
0fd97ccf CH |
153 | |
154 | dev->dev_attrib.hw_block_size = | |
155 | bdev_logical_block_size(inode->i_bdev); | |
156 | dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); | |
157 | ||
c66ac9db NB |
158 | /* |
159 | * Determine the number of bytes from i_size_read() minus | |
160 | * one (1) logical sector from underlying struct block_device | |
161 | */ | |
cd9323fd | 162 | dev_size = (i_size_read(file->f_mapping->host) - |
c66ac9db NB |
163 | fd_dev->fd_block_size); |
164 | ||
6708bb27 | 165 | pr_debug("FILEIO: Using size: %llu bytes from struct" |
c66ac9db | 166 | " block_device blocks: %llu logical_block_size: %d\n", |
cd9323fd | 167 | dev_size, div_u64(dev_size, fd_dev->fd_block_size), |
c66ac9db NB |
168 | fd_dev->fd_block_size); |
169 | } else { | |
170 | if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { | |
6708bb27 | 171 | pr_err("FILEIO: Missing fd_dev_size=" |
c66ac9db NB |
172 | " parameter, and no backing struct" |
173 | " block_device\n"); | |
174 | goto fail; | |
175 | } | |
176 | ||
0fd97ccf CH |
177 | dev->dev_attrib.hw_block_size = FD_BLOCKSIZE; |
178 | dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; | |
c66ac9db NB |
179 | } |
180 | ||
0fd97ccf | 181 | fd_dev->fd_block_size = dev->dev_attrib.hw_block_size; |
c66ac9db | 182 | |
0fd97ccf | 183 | dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; |
c66ac9db | 184 | |
b32f4c7e NB |
185 | if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { |
186 | pr_debug("FILEIO: Forcing setting of emulate_write_cache=1" | |
187 | " with FDBD_HAS_BUFFERED_IO_WCE\n"); | |
0fd97ccf | 188 | dev->dev_attrib.emulate_write_cache = 1; |
b32f4c7e NB |
189 | } |
190 | ||
c66ac9db NB |
191 | fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; |
192 | fd_dev->fd_queue_depth = dev->queue_depth; | |
193 | ||
6708bb27 | 194 | pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," |
c66ac9db NB |
195 | " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, |
196 | fd_dev->fd_dev_name, fd_dev->fd_dev_size); | |
197 | ||
0fd97ccf | 198 | return 0; |
c66ac9db NB |
199 | fail: |
200 | if (fd_dev->fd_file) { | |
201 | filp_close(fd_dev->fd_file, NULL); | |
202 | fd_dev->fd_file = NULL; | |
203 | } | |
0fd97ccf | 204 | return ret; |
c66ac9db NB |
205 | } |
206 | ||
0fd97ccf | 207 | static void fd_free_device(struct se_device *dev) |
c66ac9db | 208 | { |
0fd97ccf | 209 | struct fd_dev *fd_dev = FD_DEV(dev); |
c66ac9db NB |
210 | |
211 | if (fd_dev->fd_file) { | |
212 | filp_close(fd_dev->fd_file, NULL); | |
213 | fd_dev->fd_file = NULL; | |
214 | } | |
215 | ||
216 | kfree(fd_dev); | |
217 | } | |
218 | ||
778229af SAS |
219 | static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl, |
220 | u32 sgl_nents, int is_write) | |
c66ac9db | 221 | { |
5787cacd | 222 | struct se_device *se_dev = cmd->se_dev; |
0fd97ccf | 223 | struct fd_dev *dev = FD_DEV(se_dev); |
6708bb27 | 224 | struct file *fd = dev->fd_file; |
5787cacd | 225 | struct scatterlist *sg; |
c66ac9db NB |
226 | struct iovec *iov; |
227 | mm_segment_t old_fs; | |
0fd97ccf | 228 | loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size); |
c66ac9db NB |
229 | int ret = 0, i; |
230 | ||
5787cacd | 231 | iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL); |
6708bb27 AG |
232 | if (!iov) { |
233 | pr_err("Unable to allocate fd_do_readv iov[]\n"); | |
e3d6f909 | 234 | return -ENOMEM; |
c66ac9db NB |
235 | } |
236 | ||
5787cacd | 237 | for_each_sg(sgl, sg, sgl_nents, i) { |
9649fa1b | 238 | iov[i].iov_len = sg->length; |
40ff2c3b | 239 | iov[i].iov_base = kmap(sg_page(sg)) + sg->offset; |
c66ac9db NB |
240 | } |
241 | ||
242 | old_fs = get_fs(); | |
243 | set_fs(get_ds()); | |
778229af SAS |
244 | |
245 | if (is_write) | |
246 | ret = vfs_writev(fd, &iov[0], sgl_nents, &pos); | |
247 | else | |
248 | ret = vfs_readv(fd, &iov[0], sgl_nents, &pos); | |
249 | ||
c66ac9db NB |
250 | set_fs(old_fs); |
251 | ||
40ff2c3b SAS |
252 | for_each_sg(sgl, sg, sgl_nents, i) |
253 | kunmap(sg_page(sg)); | |
778229af | 254 | |
c66ac9db | 255 | kfree(iov); |
778229af SAS |
256 | |
257 | if (is_write) { | |
5787cacd | 258 | if (ret < 0 || ret != cmd->data_length) { |
778229af | 259 | pr_err("%s() write returned %d\n", __func__, ret); |
e3d6f909 | 260 | return (ret < 0 ? ret : -EINVAL); |
c66ac9db NB |
261 | } |
262 | } else { | |
778229af SAS |
263 | /* |
264 | * Return zeros and GOOD status even if the READ did not return | |
265 | * the expected virt_size for struct file w/o a backing struct | |
266 | * block_device. | |
267 | */ | |
268 | if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { | |
269 | if (ret < 0 || ret != cmd->data_length) { | |
270 | pr_err("%s() returned %d, expecting %u for " | |
271 | "S_ISBLK\n", __func__, ret, | |
272 | cmd->data_length); | |
273 | return (ret < 0 ? ret : -EINVAL); | |
274 | } | |
275 | } else { | |
276 | if (ret < 0) { | |
277 | pr_err("%s() returned %d for non S_ISBLK\n", | |
278 | __func__, ret); | |
279 | return ret; | |
280 | } | |
c66ac9db NB |
281 | } |
282 | } | |
c66ac9db NB |
283 | return 1; |
284 | } | |
285 | ||
de103c93 CH |
286 | static sense_reason_t |
287 | fd_execute_sync_cache(struct se_cmd *cmd) | |
c66ac9db | 288 | { |
c66ac9db | 289 | struct se_device *dev = cmd->se_dev; |
0fd97ccf | 290 | struct fd_dev *fd_dev = FD_DEV(dev); |
a1d8b49a | 291 | int immed = (cmd->t_task_cdb[1] & 0x2); |
c66ac9db NB |
292 | loff_t start, end; |
293 | int ret; | |
294 | ||
295 | /* | |
296 | * If the Immediate bit is set, queue up the GOOD response | |
297 | * for this SYNCHRONIZE_CACHE op | |
298 | */ | |
299 | if (immed) | |
5787cacd | 300 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
c66ac9db NB |
301 | |
302 | /* | |
303 | * Determine if we will be flushing the entire device. | |
304 | */ | |
a1d8b49a | 305 | if (cmd->t_task_lba == 0 && cmd->data_length == 0) { |
c66ac9db NB |
306 | start = 0; |
307 | end = LLONG_MAX; | |
308 | } else { | |
0fd97ccf | 309 | start = cmd->t_task_lba * dev->dev_attrib.block_size; |
c66ac9db NB |
310 | if (cmd->data_length) |
311 | end = start + cmd->data_length; | |
312 | else | |
313 | end = LLONG_MAX; | |
314 | } | |
315 | ||
316 | ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); | |
317 | if (ret != 0) | |
6708bb27 | 318 | pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); |
c66ac9db | 319 | |
5787cacd | 320 | if (immed) |
ad67f0d9 | 321 | return 0; |
5787cacd | 322 | |
de103c93 | 323 | if (ret) |
5787cacd | 324 | target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); |
de103c93 | 325 | else |
5787cacd | 326 | target_complete_cmd(cmd, SAM_STAT_GOOD); |
ad67f0d9 CH |
327 | |
328 | return 0; | |
c66ac9db NB |
329 | } |
330 | ||
de103c93 CH |
331 | static sense_reason_t |
332 | fd_execute_rw(struct se_cmd *cmd) | |
c66ac9db | 333 | { |
0c2ad7d1 CH |
334 | struct scatterlist *sgl = cmd->t_data_sg; |
335 | u32 sgl_nents = cmd->t_data_nents; | |
336 | enum dma_data_direction data_direction = cmd->data_direction; | |
c66ac9db NB |
337 | struct se_device *dev = cmd->se_dev; |
338 | int ret = 0; | |
339 | ||
340 | /* | |
341 | * Call vectorized fileio functions to map struct scatterlist | |
342 | * physical memory addresses to struct iovec virtual memory. | |
343 | */ | |
5787cacd | 344 | if (data_direction == DMA_FROM_DEVICE) { |
778229af | 345 | ret = fd_do_rw(cmd, sgl, sgl_nents, 0); |
c66ac9db | 346 | } else { |
778229af | 347 | ret = fd_do_rw(cmd, sgl, sgl_nents, 1); |
a4dff304 NB |
348 | /* |
349 | * Perform implict vfs_fsync_range() for fd_do_writev() ops | |
350 | * for SCSI WRITEs with Forced Unit Access (FUA) set. | |
351 | * Allow this to happen independent of WCE=0 setting. | |
352 | */ | |
c66ac9db | 353 | if (ret > 0 && |
0fd97ccf | 354 | dev->dev_attrib.emulate_fua_write > 0 && |
2d3a4b51 | 355 | (cmd->se_cmd_flags & SCF_FUA)) { |
0fd97ccf | 356 | struct fd_dev *fd_dev = FD_DEV(dev); |
a4dff304 | 357 | loff_t start = cmd->t_task_lba * |
0fd97ccf | 358 | dev->dev_attrib.block_size; |
a4dff304 | 359 | loff_t end = start + cmd->data_length; |
c66ac9db | 360 | |
a4dff304 NB |
361 | vfs_fsync_range(fd_dev->fd_file, start, end, 1); |
362 | } | |
c66ac9db NB |
363 | } |
364 | ||
de103c93 CH |
365 | if (ret < 0) |
366 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | |
367 | ||
5787cacd CH |
368 | if (ret) |
369 | target_complete_cmd(cmd, SAM_STAT_GOOD); | |
03e98c9e | 370 | return 0; |
c66ac9db NB |
371 | } |
372 | ||
c66ac9db NB |
373 | enum { |
374 | Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err | |
375 | }; | |
376 | ||
377 | static match_table_t tokens = { | |
378 | {Opt_fd_dev_name, "fd_dev_name=%s"}, | |
379 | {Opt_fd_dev_size, "fd_dev_size=%s"}, | |
b32f4c7e | 380 | {Opt_fd_buffered_io, "fd_buffered_io=%d"}, |
c66ac9db NB |
381 | {Opt_err, NULL} |
382 | }; | |
383 | ||
0fd97ccf CH |
384 | static ssize_t fd_set_configfs_dev_params(struct se_device *dev, |
385 | const char *page, ssize_t count) | |
c66ac9db | 386 | { |
0fd97ccf | 387 | struct fd_dev *fd_dev = FD_DEV(dev); |
c66ac9db NB |
388 | char *orig, *ptr, *arg_p, *opts; |
389 | substring_t args[MAX_OPT_ARGS]; | |
b32f4c7e | 390 | int ret = 0, arg, token; |
c66ac9db NB |
391 | |
392 | opts = kstrdup(page, GFP_KERNEL); | |
393 | if (!opts) | |
394 | return -ENOMEM; | |
395 | ||
396 | orig = opts; | |
397 | ||
90c161b6 | 398 | while ((ptr = strsep(&opts, ",\n")) != NULL) { |
c66ac9db NB |
399 | if (!*ptr) |
400 | continue; | |
401 | ||
402 | token = match_token(ptr, tokens, args); | |
403 | switch (token) { | |
404 | case Opt_fd_dev_name: | |
dbc6e022 AV |
405 | if (match_strlcpy(fd_dev->fd_dev_name, &args[0], |
406 | FD_MAX_DEV_NAME) == 0) { | |
407 | ret = -EINVAL; | |
6d180253 JJ |
408 | break; |
409 | } | |
6708bb27 | 410 | pr_debug("FILEIO: Referencing Path: %s\n", |
c66ac9db NB |
411 | fd_dev->fd_dev_name); |
412 | fd_dev->fbd_flags |= FBDF_HAS_PATH; | |
413 | break; | |
414 | case Opt_fd_dev_size: | |
415 | arg_p = match_strdup(&args[0]); | |
6d180253 JJ |
416 | if (!arg_p) { |
417 | ret = -ENOMEM; | |
418 | break; | |
419 | } | |
c66ac9db | 420 | ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); |
6d180253 | 421 | kfree(arg_p); |
c66ac9db | 422 | if (ret < 0) { |
6708bb27 | 423 | pr_err("strict_strtoull() failed for" |
c66ac9db NB |
424 | " fd_dev_size=\n"); |
425 | goto out; | |
426 | } | |
6708bb27 | 427 | pr_debug("FILEIO: Referencing Size: %llu" |
c66ac9db NB |
428 | " bytes\n", fd_dev->fd_dev_size); |
429 | fd_dev->fbd_flags |= FBDF_HAS_SIZE; | |
430 | break; | |
b32f4c7e NB |
431 | case Opt_fd_buffered_io: |
432 | match_int(args, &arg); | |
433 | if (arg != 1) { | |
434 | pr_err("bogus fd_buffered_io=%d value\n", arg); | |
435 | ret = -EINVAL; | |
436 | goto out; | |
437 | } | |
438 | ||
439 | pr_debug("FILEIO: Using buffered I/O" | |
440 | " operations for struct fd_dev\n"); | |
441 | ||
442 | fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE; | |
443 | break; | |
c66ac9db NB |
444 | default: |
445 | break; | |
446 | } | |
447 | } | |
448 | ||
449 | out: | |
450 | kfree(orig); | |
451 | return (!ret) ? count : ret; | |
452 | } | |
453 | ||
0fd97ccf | 454 | static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b) |
c66ac9db | 455 | { |
0fd97ccf | 456 | struct fd_dev *fd_dev = FD_DEV(dev); |
c66ac9db NB |
457 | ssize_t bl = 0; |
458 | ||
459 | bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); | |
b32f4c7e NB |
460 | bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", |
461 | fd_dev->fd_dev_name, fd_dev->fd_dev_size, | |
462 | (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ? | |
463 | "Buffered-WCE" : "O_DSYNC"); | |
c66ac9db NB |
464 | return bl; |
465 | } | |
466 | ||
c66ac9db NB |
467 | static sector_t fd_get_blocks(struct se_device *dev) |
468 | { | |
0fd97ccf | 469 | struct fd_dev *fd_dev = FD_DEV(dev); |
cd9323fd NB |
470 | struct file *f = fd_dev->fd_file; |
471 | struct inode *i = f->f_mapping->host; | |
472 | unsigned long long dev_size; | |
473 | /* | |
474 | * When using a file that references an underlying struct block_device, | |
475 | * ensure dev_size is always based on the current inode size in order | |
476 | * to handle underlying block_device resize operations. | |
477 | */ | |
478 | if (S_ISBLK(i->i_mode)) | |
479 | dev_size = (i_size_read(i) - fd_dev->fd_block_size); | |
480 | else | |
481 | dev_size = fd_dev->fd_dev_size; | |
c66ac9db | 482 | |
0fd97ccf | 483 | return div_u64(dev_size, dev->dev_attrib.block_size); |
c66ac9db NB |
484 | } |
485 | ||
9e999a6c | 486 | static struct sbc_ops fd_sbc_ops = { |
0c2ad7d1 | 487 | .execute_rw = fd_execute_rw, |
ad67f0d9 | 488 | .execute_sync_cache = fd_execute_sync_cache, |
0c2ad7d1 CH |
489 | }; |
490 | ||
de103c93 CH |
491 | static sense_reason_t |
492 | fd_parse_cdb(struct se_cmd *cmd) | |
0c2ad7d1 | 493 | { |
9e999a6c | 494 | return sbc_parse_cdb(cmd, &fd_sbc_ops); |
0c2ad7d1 CH |
495 | } |
496 | ||
c66ac9db NB |
497 | static struct se_subsystem_api fileio_template = { |
498 | .name = "fileio", | |
0fd97ccf CH |
499 | .inquiry_prod = "FILEIO", |
500 | .inquiry_rev = FD_VERSION, | |
c66ac9db NB |
501 | .owner = THIS_MODULE, |
502 | .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, | |
503 | .attach_hba = fd_attach_hba, | |
504 | .detach_hba = fd_detach_hba, | |
0fd97ccf CH |
505 | .alloc_device = fd_alloc_device, |
506 | .configure_device = fd_configure_device, | |
c66ac9db | 507 | .free_device = fd_free_device, |
0c2ad7d1 | 508 | .parse_cdb = fd_parse_cdb, |
c66ac9db NB |
509 | .set_configfs_dev_params = fd_set_configfs_dev_params, |
510 | .show_configfs_dev_params = fd_show_configfs_dev_params, | |
6f23ac8a | 511 | .get_device_type = sbc_get_device_type, |
c66ac9db NB |
512 | .get_blocks = fd_get_blocks, |
513 | }; | |
514 | ||
515 | static int __init fileio_module_init(void) | |
516 | { | |
517 | return transport_subsystem_register(&fileio_template); | |
518 | } | |
519 | ||
520 | static void fileio_module_exit(void) | |
521 | { | |
522 | transport_subsystem_release(&fileio_template); | |
523 | } | |
524 | ||
525 | MODULE_DESCRIPTION("TCM FILEIO subsystem plugin"); | |
526 | MODULE_AUTHOR("[email protected]"); | |
527 | MODULE_LICENSE("GPL"); | |
528 | ||
529 | module_init(fileio_module_init); | |
530 | module_exit(fileio_module_exit); |