1 /*******************************************************************************
2 * Filename: target_core_file.c
4 * This file contains the Storage Engine <-> FILEIO transport specific functions
6 * Copyright (c) 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * Nicholas A. Bellinger <nab@kernel.org>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 ******************************************************************************/
29 #include <linux/string.h>
30 #include <linux/parser.h>
31 #include <linux/timer.h>
32 #include <linux/blkdev.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/module.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_device.h>
41 #include <target/target_core_transport.h>
43 #include "target_core_file.h"
45 static struct se_subsystem_api fileio_template;
47 /* fd_attach_hba(): (Part of se_subsystem_api_t template)
51 static int fd_attach_hba(struct se_hba *hba, u32 host_id)
53 struct fd_host *fd_host;
55 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
57 pr_err("Unable to allocate memory for struct fd_host\n");
61 fd_host->fd_host_id = host_id;
63 hba->hba_ptr = fd_host;
65 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
66 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
67 TARGET_CORE_MOD_VERSION);
68 pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
70 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
75 static void fd_detach_hba(struct se_hba *hba)
77 struct fd_host *fd_host = hba->hba_ptr;
79 pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
80 " Target Core\n", hba->hba_id, fd_host->fd_host_id);
86 static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
88 struct fd_dev *fd_dev;
89 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
91 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
93 pr_err("Unable to allocate memory for struct fd_dev\n");
97 fd_dev->fd_host = fd_host;
99 pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
104 /* fd_create_virtdevice(): (Part of se_subsystem_api_t template)
108 static struct se_device *fd_create_virtdevice(
110 struct se_subsystem_dev *se_dev,
114 struct se_device *dev;
115 struct se_dev_limits dev_limits;
116 struct queue_limits *limits;
117 struct fd_dev *fd_dev = (struct fd_dev *) p;
118 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
121 struct inode *inode = NULL;
122 int dev_flags = 0, flags, ret = -EINVAL;
124 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
128 dev_p = getname(fd_dev->fd_dev_name);
132 pr_err("getname(%s) failed: %lu\n",
133 fd_dev->fd_dev_name, IS_ERR(dev_p));
134 ret = PTR_ERR(dev_p);
138 * Use O_DSYNC by default instead of O_SYNC to forgo syncing
139 * of pure timestamp updates.
141 flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
143 file = filp_open(dev_p, flags, 0600);
145 pr_err("filp_open(%s) failed\n", dev_p);
149 if (!file || !file->f_dentry) {
150 pr_err("filp_open(%s) failed\n", dev_p);
153 fd_dev->fd_file = file;
155 * If using a block backend with this struct file, we extract
156 * fd_dev->fd_[block,dev]_size from struct block_device.
158 * Otherwise, we use the passed fd_size= from configfs
160 inode = file->f_mapping->host;
161 if (S_ISBLK(inode->i_mode)) {
162 struct request_queue *q;
163 unsigned long long dev_size;
165 * Setup the local scope queue_limits from struct request_queue->limits
166 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
168 q = bdev_get_queue(inode->i_bdev);
169 limits = &dev_limits.limits;
170 limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
171 limits->max_hw_sectors = queue_max_hw_sectors(q);
172 limits->max_sectors = queue_max_sectors(q);
174 * Determine the number of bytes from i_size_read() minus
175 * one (1) logical sector from underlying struct block_device
177 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
178 dev_size = (i_size_read(file->f_mapping->host) -
179 fd_dev->fd_block_size);
181 pr_debug("FILEIO: Using size: %llu bytes from struct"
182 " block_device blocks: %llu logical_block_size: %d\n",
183 dev_size, div_u64(dev_size, fd_dev->fd_block_size),
184 fd_dev->fd_block_size);
186 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
187 pr_err("FILEIO: Missing fd_dev_size="
188 " parameter, and no backing struct"
193 limits = &dev_limits.limits;
194 limits->logical_block_size = FD_BLOCKSIZE;
195 limits->max_hw_sectors = FD_MAX_SECTORS;
196 limits->max_sectors = FD_MAX_SECTORS;
197 fd_dev->fd_block_size = FD_BLOCKSIZE;
200 dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
201 dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
203 dev = transport_add_device_to_core_hba(hba, &fileio_template,
204 se_dev, dev_flags, fd_dev,
205 &dev_limits, "FILEIO", FD_VERSION);
209 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
210 fd_dev->fd_queue_depth = dev->queue_depth;
212 pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
213 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
214 fd_dev->fd_dev_name, fd_dev->fd_dev_size);
219 if (fd_dev->fd_file) {
220 filp_close(fd_dev->fd_file, NULL);
221 fd_dev->fd_file = NULL;
227 /* fd_free_device(): (Part of se_subsystem_api_t template)
231 static void fd_free_device(void *p)
233 struct fd_dev *fd_dev = (struct fd_dev *) p;
235 if (fd_dev->fd_file) {
236 filp_close(fd_dev->fd_file, NULL);
237 fd_dev->fd_file = NULL;
243 static inline struct fd_request *FILE_REQ(struct se_task *task)
245 return container_of(task, struct fd_request, fd_task);
249 static struct se_task *
250 fd_alloc_task(unsigned char *cdb)
252 struct fd_request *fd_req;
254 fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
256 pr_err("Unable to allocate struct fd_request\n");
260 return &fd_req->fd_task;
263 static int fd_do_readv(struct se_task *task)
265 struct fd_request *req = FILE_REQ(task);
266 struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
267 struct fd_dev *dev = se_dev->dev_ptr;
268 struct file *fd = dev->fd_file;
269 struct scatterlist *sg = task->task_sg;
272 loff_t pos = (task->task_lba *
273 se_dev->se_sub_dev->se_dev_attrib.block_size);
276 iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
278 pr_err("Unable to allocate fd_do_readv iov[]\n");
282 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
283 iov[i].iov_len = sg->length;
284 iov[i].iov_base = sg_virt(sg);
289 ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
294 * Return zeros and GOOD status even if the READ did not return
295 * the expected virt_size for struct file w/o a backing struct
298 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
299 if (ret < 0 || ret != task->task_size) {
300 pr_err("vfs_readv() returned %d,"
301 " expecting %d for S_ISBLK\n", ret,
302 (int)task->task_size);
303 return (ret < 0 ? ret : -EINVAL);
307 pr_err("vfs_readv() returned %d for non"
316 static int fd_do_writev(struct se_task *task)
318 struct fd_request *req = FILE_REQ(task);
319 struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
320 struct fd_dev *dev = se_dev->dev_ptr;
321 struct file *fd = dev->fd_file;
322 struct scatterlist *sg = task->task_sg;
325 loff_t pos = (task->task_lba *
326 se_dev->se_sub_dev->se_dev_attrib.block_size);
329 iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
331 pr_err("Unable to allocate fd_do_writev iov[]\n");
335 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
336 iov[i].iov_len = sg->length;
337 iov[i].iov_base = sg_virt(sg);
342 ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
347 if (ret < 0 || ret != task->task_size) {
348 pr_err("vfs_writev() returned %d\n", ret);
349 return (ret < 0 ? ret : -EINVAL);
355 static void fd_emulate_sync_cache(struct se_task *task)
357 struct se_cmd *cmd = task->task_se_cmd;
358 struct se_device *dev = cmd->se_dev;
359 struct fd_dev *fd_dev = dev->dev_ptr;
360 int immed = (cmd->t_task_cdb[1] & 0x2);
365 * If the Immediate bit is set, queue up the GOOD response
366 * for this SYNCHRONIZE_CACHE op
369 transport_complete_sync_cache(cmd, 1);
372 * Determine if we will be flushing the entire device.
374 if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
378 start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
379 if (cmd->data_length)
380 end = start + cmd->data_length;
385 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
387 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
390 transport_complete_sync_cache(cmd, ret == 0);
393 static int fd_do_task(struct se_task *task)
395 struct se_cmd *cmd = task->task_se_cmd;
396 struct se_device *dev = cmd->se_dev;
400 * Call vectorized fileio functions to map struct scatterlist
401 * physical memory addresses to struct iovec virtual memory.
403 if (task->task_data_direction == DMA_FROM_DEVICE) {
404 ret = fd_do_readv(task);
406 ret = fd_do_writev(task);
408 * Perform implict vfs_fsync_range() for fd_do_writev() ops
409 * for SCSI WRITEs with Forced Unit Access (FUA) set.
410 * Allow this to happen independent of WCE=0 setting.
413 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
414 (cmd->se_cmd_flags & SCF_FUA)) {
415 struct fd_dev *fd_dev = dev->dev_ptr;
416 loff_t start = task->task_lba *
417 dev->se_sub_dev->se_dev_attrib.block_size;
418 loff_t end = start + task->task_size;
420 vfs_fsync_range(fd_dev->fd_file, start, end, 1);
425 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
429 task->task_scsi_status = GOOD;
430 transport_complete_task(task, 1);
435 /* fd_free_task(): (Part of se_subsystem_api_t template)
439 static void fd_free_task(struct se_task *task)
441 struct fd_request *req = FILE_REQ(task);
447 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
450 static match_table_t tokens = {
451 {Opt_fd_dev_name, "fd_dev_name=%s"},
452 {Opt_fd_dev_size, "fd_dev_size=%s"},
456 static ssize_t fd_set_configfs_dev_params(
458 struct se_subsystem_dev *se_dev,
459 const char *page, ssize_t count)
461 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
462 char *orig, *ptr, *arg_p, *opts;
463 substring_t args[MAX_OPT_ARGS];
466 opts = kstrdup(page, GFP_KERNEL);
472 while ((ptr = strsep(&opts, ",")) != NULL) {
476 token = match_token(ptr, tokens, args);
478 case Opt_fd_dev_name:
479 arg_p = match_strdup(&args[0]);
484 snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
487 pr_debug("FILEIO: Referencing Path: %s\n",
488 fd_dev->fd_dev_name);
489 fd_dev->fbd_flags |= FBDF_HAS_PATH;
491 case Opt_fd_dev_size:
492 arg_p = match_strdup(&args[0]);
497 ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
500 pr_err("strict_strtoull() failed for"
504 pr_debug("FILEIO: Referencing Size: %llu"
505 " bytes\n", fd_dev->fd_dev_size);
506 fd_dev->fbd_flags |= FBDF_HAS_SIZE;
515 return (!ret) ? count : ret;
518 static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
520 struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
522 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
523 pr_err("Missing fd_dev_name=\n");
530 static ssize_t fd_show_configfs_dev_params(
532 struct se_subsystem_dev *se_dev,
535 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
538 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
539 bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n",
540 fd_dev->fd_dev_name, fd_dev->fd_dev_size);
544 /* fd_get_device_rev(): (Part of se_subsystem_api_t template)
548 static u32 fd_get_device_rev(struct se_device *dev)
550 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
553 /* fd_get_device_type(): (Part of se_subsystem_api_t template)
557 static u32 fd_get_device_type(struct se_device *dev)
562 static sector_t fd_get_blocks(struct se_device *dev)
564 struct fd_dev *fd_dev = dev->dev_ptr;
565 struct file *f = fd_dev->fd_file;
566 struct inode *i = f->f_mapping->host;
567 unsigned long long dev_size;
569 * When using a file that references an underlying struct block_device,
570 * ensure dev_size is always based on the current inode size in order
571 * to handle underlying block_device resize operations.
573 if (S_ISBLK(i->i_mode))
574 dev_size = (i_size_read(i) - fd_dev->fd_block_size);
576 dev_size = fd_dev->fd_dev_size;
578 return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
581 static struct se_subsystem_api fileio_template = {
583 .owner = THIS_MODULE,
584 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
585 .write_cache_emulated = 1,
586 .fua_write_emulated = 1,
587 .attach_hba = fd_attach_hba,
588 .detach_hba = fd_detach_hba,
589 .allocate_virtdevice = fd_allocate_virtdevice,
590 .create_virtdevice = fd_create_virtdevice,
591 .free_device = fd_free_device,
592 .alloc_task = fd_alloc_task,
593 .do_task = fd_do_task,
594 .do_sync_cache = fd_emulate_sync_cache,
595 .free_task = fd_free_task,
596 .check_configfs_dev_params = fd_check_configfs_dev_params,
597 .set_configfs_dev_params = fd_set_configfs_dev_params,
598 .show_configfs_dev_params = fd_show_configfs_dev_params,
599 .get_device_rev = fd_get_device_rev,
600 .get_device_type = fd_get_device_type,
601 .get_blocks = fd_get_blocks,
604 static int __init fileio_module_init(void)
606 return transport_subsystem_register(&fileio_template);
609 static void fileio_module_exit(void)
611 transport_subsystem_release(&fileio_template);
614 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
615 MODULE_AUTHOR("nab@Linux-iSCSI.org");
616 MODULE_LICENSE("GPL");
618 module_init(fileio_module_init);
619 module_exit(fileio_module_exit);