target: Return error to initiator if SET TARGET PORT GROUPS emulation fails
[pandora-kernel.git] / drivers / target / target_core_file.c
1 /*******************************************************************************
2  * Filename:  target_core_file.c
3  *
4  * This file contains the Storage Engine <-> FILEIO transport specific functions
5  *
6  * Copyright (c) 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28
29 #include <linux/string.h>
30 #include <linux/parser.h>
31 #include <linux/timer.h>
32 #include <linux/blkdev.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/module.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38
39 #include <target/target_core_base.h>
40 #include <target/target_core_device.h>
41 #include <target/target_core_transport.h>
42
43 #include "target_core_file.h"
44
45 static struct se_subsystem_api fileio_template;
46
47 /*      fd_attach_hba(): (Part of se_subsystem_api_t template)
48  *
49  *
50  */
51 static int fd_attach_hba(struct se_hba *hba, u32 host_id)
52 {
53         struct fd_host *fd_host;
54
55         fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
56         if (!fd_host) {
57                 pr_err("Unable to allocate memory for struct fd_host\n");
58                 return -ENOMEM;
59         }
60
61         fd_host->fd_host_id = host_id;
62
63         hba->hba_ptr = fd_host;
64
65         pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
66                 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
67                 TARGET_CORE_MOD_VERSION);
68         pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
69                 " MaxSectors: %u\n",
70                 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
71
72         return 0;
73 }
74
75 static void fd_detach_hba(struct se_hba *hba)
76 {
77         struct fd_host *fd_host = hba->hba_ptr;
78
79         pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
80                 " Target Core\n", hba->hba_id, fd_host->fd_host_id);
81
82         kfree(fd_host);
83         hba->hba_ptr = NULL;
84 }
85
86 static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
87 {
88         struct fd_dev *fd_dev;
89         struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
90
91         fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
92         if (!fd_dev) {
93                 pr_err("Unable to allocate memory for struct fd_dev\n");
94                 return NULL;
95         }
96
97         fd_dev->fd_host = fd_host;
98
99         pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
100
101         return fd_dev;
102 }
103
104 /*      fd_create_virtdevice(): (Part of se_subsystem_api_t template)
105  *
106  *
107  */
108 static struct se_device *fd_create_virtdevice(
109         struct se_hba *hba,
110         struct se_subsystem_dev *se_dev,
111         void *p)
112 {
113         char *dev_p = NULL;
114         struct se_device *dev;
115         struct se_dev_limits dev_limits;
116         struct queue_limits *limits;
117         struct fd_dev *fd_dev = (struct fd_dev *) p;
118         struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
119         mm_segment_t old_fs;
120         struct file *file;
121         struct inode *inode = NULL;
122         int dev_flags = 0, flags, ret = -EINVAL;
123
124         memset(&dev_limits, 0, sizeof(struct se_dev_limits));
125
126         old_fs = get_fs();
127         set_fs(get_ds());
128         dev_p = getname(fd_dev->fd_dev_name);
129         set_fs(old_fs);
130
131         if (IS_ERR(dev_p)) {
132                 pr_err("getname(%s) failed: %lu\n",
133                         fd_dev->fd_dev_name, IS_ERR(dev_p));
134                 ret = PTR_ERR(dev_p);
135                 goto fail;
136         }
137         /*
138          * Use O_DSYNC by default instead of O_SYNC to forgo syncing
139          * of pure timestamp updates.
140          */
141         flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
142
143         file = filp_open(dev_p, flags, 0600);
144         if (IS_ERR(file)) {
145                 pr_err("filp_open(%s) failed\n", dev_p);
146                 ret = PTR_ERR(file);
147                 goto fail;
148         }
149         if (!file || !file->f_dentry) {
150                 pr_err("filp_open(%s) failed\n", dev_p);
151                 goto fail;
152         }
153         fd_dev->fd_file = file;
154         /*
155          * If using a block backend with this struct file, we extract
156          * fd_dev->fd_[block,dev]_size from struct block_device.
157          *
158          * Otherwise, we use the passed fd_size= from configfs
159          */
160         inode = file->f_mapping->host;
161         if (S_ISBLK(inode->i_mode)) {
162                 struct request_queue *q;
163                 unsigned long long dev_size;
164                 /*
165                  * Setup the local scope queue_limits from struct request_queue->limits
166                  * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
167                  */
168                 q = bdev_get_queue(inode->i_bdev);
169                 limits = &dev_limits.limits;
170                 limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
171                 limits->max_hw_sectors = queue_max_hw_sectors(q);
172                 limits->max_sectors = queue_max_sectors(q);
173                 /*
174                  * Determine the number of bytes from i_size_read() minus
175                  * one (1) logical sector from underlying struct block_device
176                  */
177                 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
178                 dev_size = (i_size_read(file->f_mapping->host) -
179                                        fd_dev->fd_block_size);
180
181                 pr_debug("FILEIO: Using size: %llu bytes from struct"
182                         " block_device blocks: %llu logical_block_size: %d\n",
183                         dev_size, div_u64(dev_size, fd_dev->fd_block_size),
184                         fd_dev->fd_block_size);
185         } else {
186                 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
187                         pr_err("FILEIO: Missing fd_dev_size="
188                                 " parameter, and no backing struct"
189                                 " block_device\n");
190                         goto fail;
191                 }
192
193                 limits = &dev_limits.limits;
194                 limits->logical_block_size = FD_BLOCKSIZE;
195                 limits->max_hw_sectors = FD_MAX_SECTORS;
196                 limits->max_sectors = FD_MAX_SECTORS;
197                 fd_dev->fd_block_size = FD_BLOCKSIZE;
198         }
199
200         dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
201         dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
202
203         dev = transport_add_device_to_core_hba(hba, &fileio_template,
204                                 se_dev, dev_flags, fd_dev,
205                                 &dev_limits, "FILEIO", FD_VERSION);
206         if (!dev)
207                 goto fail;
208
209         fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
210         fd_dev->fd_queue_depth = dev->queue_depth;
211
212         pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
213                 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
214                         fd_dev->fd_dev_name, fd_dev->fd_dev_size);
215
216         putname(dev_p);
217         return dev;
218 fail:
219         if (fd_dev->fd_file) {
220                 filp_close(fd_dev->fd_file, NULL);
221                 fd_dev->fd_file = NULL;
222         }
223         putname(dev_p);
224         return ERR_PTR(ret);
225 }
226
227 /*      fd_free_device(): (Part of se_subsystem_api_t template)
228  *
229  *
230  */
231 static void fd_free_device(void *p)
232 {
233         struct fd_dev *fd_dev = (struct fd_dev *) p;
234
235         if (fd_dev->fd_file) {
236                 filp_close(fd_dev->fd_file, NULL);
237                 fd_dev->fd_file = NULL;
238         }
239
240         kfree(fd_dev);
241 }
242
243 static inline struct fd_request *FILE_REQ(struct se_task *task)
244 {
245         return container_of(task, struct fd_request, fd_task);
246 }
247
248
249 static struct se_task *
250 fd_alloc_task(unsigned char *cdb)
251 {
252         struct fd_request *fd_req;
253
254         fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
255         if (!fd_req) {
256                 pr_err("Unable to allocate struct fd_request\n");
257                 return NULL;
258         }
259
260         return &fd_req->fd_task;
261 }
262
263 static int fd_do_readv(struct se_task *task)
264 {
265         struct fd_request *req = FILE_REQ(task);
266         struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
267         struct fd_dev *dev = se_dev->dev_ptr;
268         struct file *fd = dev->fd_file;
269         struct scatterlist *sg = task->task_sg;
270         struct iovec *iov;
271         mm_segment_t old_fs;
272         loff_t pos = (task->task_lba *
273                       se_dev->se_sub_dev->se_dev_attrib.block_size);
274         int ret = 0, i;
275
276         iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
277         if (!iov) {
278                 pr_err("Unable to allocate fd_do_readv iov[]\n");
279                 return -ENOMEM;
280         }
281
282         for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
283                 iov[i].iov_len = sg->length;
284                 iov[i].iov_base = sg_virt(sg);
285         }
286
287         old_fs = get_fs();
288         set_fs(get_ds());
289         ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
290         set_fs(old_fs);
291
292         kfree(iov);
293         /*
294          * Return zeros and GOOD status even if the READ did not return
295          * the expected virt_size for struct file w/o a backing struct
296          * block_device.
297          */
298         if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
299                 if (ret < 0 || ret != task->task_size) {
300                         pr_err("vfs_readv() returned %d,"
301                                 " expecting %d for S_ISBLK\n", ret,
302                                 (int)task->task_size);
303                         return (ret < 0 ? ret : -EINVAL);
304                 }
305         } else {
306                 if (ret < 0) {
307                         pr_err("vfs_readv() returned %d for non"
308                                 " S_ISBLK\n", ret);
309                         return ret;
310                 }
311         }
312
313         return 1;
314 }
315
316 static int fd_do_writev(struct se_task *task)
317 {
318         struct fd_request *req = FILE_REQ(task);
319         struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
320         struct fd_dev *dev = se_dev->dev_ptr;
321         struct file *fd = dev->fd_file;
322         struct scatterlist *sg = task->task_sg;
323         struct iovec *iov;
324         mm_segment_t old_fs;
325         loff_t pos = (task->task_lba *
326                       se_dev->se_sub_dev->se_dev_attrib.block_size);
327         int ret, i = 0;
328
329         iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
330         if (!iov) {
331                 pr_err("Unable to allocate fd_do_writev iov[]\n");
332                 return -ENOMEM;
333         }
334
335         for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
336                 iov[i].iov_len = sg->length;
337                 iov[i].iov_base = sg_virt(sg);
338         }
339
340         old_fs = get_fs();
341         set_fs(get_ds());
342         ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
343         set_fs(old_fs);
344
345         kfree(iov);
346
347         if (ret < 0 || ret != task->task_size) {
348                 pr_err("vfs_writev() returned %d\n", ret);
349                 return (ret < 0 ? ret : -EINVAL);
350         }
351
352         return 1;
353 }
354
355 static void fd_emulate_sync_cache(struct se_task *task)
356 {
357         struct se_cmd *cmd = task->task_se_cmd;
358         struct se_device *dev = cmd->se_dev;
359         struct fd_dev *fd_dev = dev->dev_ptr;
360         int immed = (cmd->t_task_cdb[1] & 0x2);
361         loff_t start, end;
362         int ret;
363
364         /*
365          * If the Immediate bit is set, queue up the GOOD response
366          * for this SYNCHRONIZE_CACHE op
367          */
368         if (immed)
369                 transport_complete_sync_cache(cmd, 1);
370
371         /*
372          * Determine if we will be flushing the entire device.
373          */
374         if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
375                 start = 0;
376                 end = LLONG_MAX;
377         } else {
378                 start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
379                 if (cmd->data_length)
380                         end = start + cmd->data_length;
381                 else
382                         end = LLONG_MAX;
383         }
384
385         ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
386         if (ret != 0)
387                 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
388
389         if (!immed)
390                 transport_complete_sync_cache(cmd, ret == 0);
391 }
392
393 static int fd_do_task(struct se_task *task)
394 {
395         struct se_cmd *cmd = task->task_se_cmd;
396         struct se_device *dev = cmd->se_dev;
397         int ret = 0;
398
399         /*
400          * Call vectorized fileio functions to map struct scatterlist
401          * physical memory addresses to struct iovec virtual memory.
402          */
403         if (task->task_data_direction == DMA_FROM_DEVICE) {
404                 ret = fd_do_readv(task);
405         } else {
406                 ret = fd_do_writev(task);
407                 /*
408                  * Perform implict vfs_fsync_range() for fd_do_writev() ops
409                  * for SCSI WRITEs with Forced Unit Access (FUA) set.
410                  * Allow this to happen independent of WCE=0 setting.
411                  */
412                 if (ret > 0 &&
413                     dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
414                     (cmd->se_cmd_flags & SCF_FUA)) {
415                         struct fd_dev *fd_dev = dev->dev_ptr;
416                         loff_t start = task->task_lba *
417                                 dev->se_sub_dev->se_dev_attrib.block_size;
418                         loff_t end = start + task->task_size;
419
420                         vfs_fsync_range(fd_dev->fd_file, start, end, 1);
421                 }
422         }
423
424         if (ret < 0) {
425                 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
426                 return ret;
427         }
428         if (ret) {
429                 task->task_scsi_status = GOOD;
430                 transport_complete_task(task, 1);
431         }
432         return 0;
433 }
434
435 /*      fd_free_task(): (Part of se_subsystem_api_t template)
436  *
437  *
438  */
439 static void fd_free_task(struct se_task *task)
440 {
441         struct fd_request *req = FILE_REQ(task);
442
443         kfree(req);
444 }
445
446 enum {
447         Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
448 };
449
450 static match_table_t tokens = {
451         {Opt_fd_dev_name, "fd_dev_name=%s"},
452         {Opt_fd_dev_size, "fd_dev_size=%s"},
453         {Opt_err, NULL}
454 };
455
456 static ssize_t fd_set_configfs_dev_params(
457         struct se_hba *hba,
458         struct se_subsystem_dev *se_dev,
459         const char *page, ssize_t count)
460 {
461         struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
462         char *orig, *ptr, *arg_p, *opts;
463         substring_t args[MAX_OPT_ARGS];
464         int ret = 0, token;
465
466         opts = kstrdup(page, GFP_KERNEL);
467         if (!opts)
468                 return -ENOMEM;
469
470         orig = opts;
471
472         while ((ptr = strsep(&opts, ",")) != NULL) {
473                 if (!*ptr)
474                         continue;
475
476                 token = match_token(ptr, tokens, args);
477                 switch (token) {
478                 case Opt_fd_dev_name:
479                         arg_p = match_strdup(&args[0]);
480                         if (!arg_p) {
481                                 ret = -ENOMEM;
482                                 break;
483                         }
484                         snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
485                                         "%s", arg_p);
486                         kfree(arg_p);
487                         pr_debug("FILEIO: Referencing Path: %s\n",
488                                         fd_dev->fd_dev_name);
489                         fd_dev->fbd_flags |= FBDF_HAS_PATH;
490                         break;
491                 case Opt_fd_dev_size:
492                         arg_p = match_strdup(&args[0]);
493                         if (!arg_p) {
494                                 ret = -ENOMEM;
495                                 break;
496                         }
497                         ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
498                         kfree(arg_p);
499                         if (ret < 0) {
500                                 pr_err("strict_strtoull() failed for"
501                                                 " fd_dev_size=\n");
502                                 goto out;
503                         }
504                         pr_debug("FILEIO: Referencing Size: %llu"
505                                         " bytes\n", fd_dev->fd_dev_size);
506                         fd_dev->fbd_flags |= FBDF_HAS_SIZE;
507                         break;
508                 default:
509                         break;
510                 }
511         }
512
513 out:
514         kfree(orig);
515         return (!ret) ? count : ret;
516 }
517
518 static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
519 {
520         struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
521
522         if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
523                 pr_err("Missing fd_dev_name=\n");
524                 return -EINVAL;
525         }
526
527         return 0;
528 }
529
530 static ssize_t fd_show_configfs_dev_params(
531         struct se_hba *hba,
532         struct se_subsystem_dev *se_dev,
533         char *b)
534 {
535         struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
536         ssize_t bl = 0;
537
538         bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
539         bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: O_DSYNC\n",
540                 fd_dev->fd_dev_name, fd_dev->fd_dev_size);
541         return bl;
542 }
543
544 /*      fd_get_device_rev(): (Part of se_subsystem_api_t template)
545  *
546  *
547  */
548 static u32 fd_get_device_rev(struct se_device *dev)
549 {
550         return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
551 }
552
553 /*      fd_get_device_type(): (Part of se_subsystem_api_t template)
554  *
555  *
556  */
557 static u32 fd_get_device_type(struct se_device *dev)
558 {
559         return TYPE_DISK;
560 }
561
562 static sector_t fd_get_blocks(struct se_device *dev)
563 {
564         struct fd_dev *fd_dev = dev->dev_ptr;
565         struct file *f = fd_dev->fd_file;
566         struct inode *i = f->f_mapping->host;
567         unsigned long long dev_size;
568         /*
569          * When using a file that references an underlying struct block_device,
570          * ensure dev_size is always based on the current inode size in order
571          * to handle underlying block_device resize operations.
572          */
573         if (S_ISBLK(i->i_mode))
574                 dev_size = (i_size_read(i) - fd_dev->fd_block_size);
575         else
576                 dev_size = fd_dev->fd_dev_size;
577
578         return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
579 }
580
581 static struct se_subsystem_api fileio_template = {
582         .name                   = "fileio",
583         .owner                  = THIS_MODULE,
584         .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
585         .write_cache_emulated   = 1,
586         .fua_write_emulated     = 1,
587         .attach_hba             = fd_attach_hba,
588         .detach_hba             = fd_detach_hba,
589         .allocate_virtdevice    = fd_allocate_virtdevice,
590         .create_virtdevice      = fd_create_virtdevice,
591         .free_device            = fd_free_device,
592         .alloc_task             = fd_alloc_task,
593         .do_task                = fd_do_task,
594         .do_sync_cache          = fd_emulate_sync_cache,
595         .free_task              = fd_free_task,
596         .check_configfs_dev_params = fd_check_configfs_dev_params,
597         .set_configfs_dev_params = fd_set_configfs_dev_params,
598         .show_configfs_dev_params = fd_show_configfs_dev_params,
599         .get_device_rev         = fd_get_device_rev,
600         .get_device_type        = fd_get_device_type,
601         .get_blocks             = fd_get_blocks,
602 };
603
604 static int __init fileio_module_init(void)
605 {
606         return transport_subsystem_register(&fileio_template);
607 }
608
609 static void fileio_module_exit(void)
610 {
611         transport_subsystem_release(&fileio_template);
612 }
613
614 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
615 MODULE_AUTHOR("nab@Linux-iSCSI.org");
616 MODULE_LICENSE("GPL");
617
618 module_init(fileio_module_init);
619 module_exit(fileio_module_exit);