X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Ftarget%2Ftarget_core_file.c;h=ea29eaf23789d9d20cb8846026b916c8b1ae9456;hb=816d7d34185291d996c041adf0b9ab58bf5a73d7;hp=b4864fba4ef0d511758916a8debac60ee9f43674;hpb=1cac8e884fbea0b10ee5692cd97e456b58c34ce5;p=pandora-kernel.git diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index b4864fba4ef0..ea29eaf23789 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -134,21 +134,24 @@ static struct se_device *fd_create_virtdevice( ret = PTR_ERR(dev_p); goto fail; } -#if 0 - if (di->no_create_file) - flags = O_RDWR | O_LARGEFILE; - else - flags = O_RDWR | O_CREAT | O_LARGEFILE; -#else - flags = O_RDWR | O_CREAT | O_LARGEFILE; -#endif -/* flags |= O_DIRECT; */ /* - * If fd_buffered_io=1 has not been set explicitly (the default), - * use O_SYNC to force FILEIO writes to disk. + * Use O_DSYNC by default instead of O_SYNC to forgo syncing + * of pure timestamp updates. + */ + flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; + /* + * Optionally allow fd_buffered_io=1 to be enabled for people + * who want use the fs buffer cache as an WriteCache mechanism. + * + * This means that in event of a hard failure, there is a risk + * of silent data-loss if the SCSI client has *not* performed a + * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE + * to write-out the entire device cache. */ - if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO)) - flags |= O_SYNC; + if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { + pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n"); + flags &= ~O_DSYNC; + } file = filp_open(dev_p, flags, 0600); if (IS_ERR(file)) { @@ -170,6 +173,7 @@ static struct se_device *fd_create_virtdevice( inode = file->f_mapping->host; if (S_ISBLK(inode->i_mode)) { struct request_queue *q; + unsigned long long dev_size; /* * Setup the local scope queue_limits from struct request_queue->limits * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. @@ -184,13 +188,12 @@ static struct se_device *fd_create_virtdevice( * one (1) logical sector from underlying struct block_device */ fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); - fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) - + dev_size = (i_size_read(file->f_mapping->host) - fd_dev->fd_block_size); pr_debug("FILEIO: Using size: %llu bytes from struct" " block_device blocks: %llu logical_block_size: %d\n", - fd_dev->fd_dev_size, - div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size), + dev_size, div_u64(dev_size, fd_dev->fd_block_size), fd_dev->fd_block_size); } else { if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { @@ -216,6 +219,12 @@ static struct se_device *fd_create_virtdevice( if (!dev) goto fail; + if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { + pr_debug("FILEIO: Forcing setting of emulate_write_cache=1" + " with FDBD_HAS_BUFFERED_IO_WCE\n"); + dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1; + } + fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; fd_dev->fd_queue_depth = dev->queue_depth; @@ -291,7 +300,7 @@ static int fd_do_readv(struct se_task *task) for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { iov[i].iov_len = sg->length; - iov[i].iov_base = sg_virt(sg); + iov[i].iov_base = kmap(sg_page(sg)) + sg->offset; } old_fs = get_fs(); @@ -299,6 +308,8 @@ static int fd_do_readv(struct se_task *task) ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos); set_fs(old_fs); + for_each_sg(task->task_sg, sg, task->task_sg_nents, i) + kunmap(sg_page(sg)); kfree(iov); /* * Return zeros and GOOD status even if the READ did not return @@ -344,7 +355,7 @@ static int fd_do_writev(struct se_task *task) for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { iov[i].iov_len = sg->length; - iov[i].iov_base = sg_virt(sg); + iov[i].iov_base = kmap(sg_page(sg)) + sg->offset; } old_fs = get_fs(); @@ -352,6 +363,9 @@ static int fd_do_writev(struct se_task *task) ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos); set_fs(old_fs); + for_each_sg(task->task_sg, sg, task->task_sg_nents, i) + kunmap(sg_page(sg)); + kfree(iov); if (ret < 0 || ret != task->task_size) { @@ -400,26 +414,6 @@ static void fd_emulate_sync_cache(struct se_task *task) transport_complete_sync_cache(cmd, ret == 0); } -/* - * WRITE Force Unit Access (FUA) emulation on a per struct se_task - * LBA range basis.. - */ -static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task) -{ - struct se_device *dev = cmd->se_dev; - struct fd_dev *fd_dev = dev->dev_ptr; - loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; - loff_t end = start + task->task_size; - int ret; - - pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", - task->task_lba, task->task_size); - - ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); - if (ret != 0) - pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); -} - static int fd_do_task(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; @@ -434,19 +428,21 @@ static int fd_do_task(struct se_task *task) ret = fd_do_readv(task); } else { ret = fd_do_writev(task); - + /* + * Perform implict vfs_fsync_range() for fd_do_writev() ops + * for SCSI WRITEs with Forced Unit Access (FUA) set. + * Allow this to happen independent of WCE=0 setting. + */ if (ret > 0 && - dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && (cmd->se_cmd_flags & SCF_FUA)) { - /* - * We might need to be a bit smarter here - * and return some sense data to let the initiator - * know the FUA WRITE cache sync failed..? - */ - fd_emulate_write_fua(cmd, task); - } + struct fd_dev *fd_dev = dev->dev_ptr; + loff_t start = task->task_lba * + dev->se_sub_dev->se_dev_attrib.block_size; + loff_t end = start + task->task_size; + vfs_fsync_range(fd_dev->fd_file, start, end, 1); + } } if (ret < 0) { @@ -545,7 +541,7 @@ static ssize_t fd_set_configfs_dev_params( pr_debug("FILEIO: Using buffered I/O" " operations for struct fd_dev\n"); - fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; + fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE; break; default: break; @@ -580,8 +576,8 @@ static ssize_t fd_show_configfs_dev_params( bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", fd_dev->fd_dev_name, fd_dev->fd_dev_size, - (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ? - "Buffered" : "Synchronous"); + (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ? + "Buffered-WCE" : "O_DSYNC"); return bl; } @@ -606,10 +602,20 @@ static u32 fd_get_device_type(struct se_device *dev) static sector_t fd_get_blocks(struct se_device *dev) { struct fd_dev *fd_dev = dev->dev_ptr; - unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size, - dev->se_sub_dev->se_dev_attrib.block_size); + struct file *f = fd_dev->fd_file; + struct inode *i = f->f_mapping->host; + unsigned long long dev_size; + /* + * When using a file that references an underlying struct block_device, + * ensure dev_size is always based on the current inode size in order + * to handle underlying block_device resize operations. + */ + if (S_ISBLK(i->i_mode)) + dev_size = (i_size_read(i) - fd_dev->fd_block_size); + else + dev_size = fd_dev->fd_dev_size; - return blocks_long; + return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size); } static struct se_subsystem_api fileio_template = {