Merge branches 'acpi', 'idle', 'mrst-pmu' and 'pm-tools' into next
[pandora-kernel.git] / drivers / target / target_core_file.c
1 /*******************************************************************************
2  * Filename:  target_core_file.c
3  *
4  * This file contains the Storage Engine <-> FILEIO transport specific functions
5  *
6  * Copyright (c) 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28
29 #include <linux/version.h>
30 #include <linux/string.h>
31 #include <linux/parser.h>
32 #include <linux/timer.h>
33 #include <linux/blkdev.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38
39 #include <target/target_core_base.h>
40 #include <target/target_core_device.h>
41 #include <target/target_core_transport.h>
42
43 #include "target_core_file.h"
44
45 static struct se_subsystem_api fileio_template;
46
47 /*      fd_attach_hba(): (Part of se_subsystem_api_t template)
48  *
49  *
50  */
51 static int fd_attach_hba(struct se_hba *hba, u32 host_id)
52 {
53         struct fd_host *fd_host;
54
55         fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
56         if (!fd_host) {
57                 pr_err("Unable to allocate memory for struct fd_host\n");
58                 return -ENOMEM;
59         }
60
61         fd_host->fd_host_id = host_id;
62
63         hba->hba_ptr = fd_host;
64
65         pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
66                 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
67                 TARGET_CORE_MOD_VERSION);
68         pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
69                 " MaxSectors: %u\n",
70                 hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
71
72         return 0;
73 }
74
75 static void fd_detach_hba(struct se_hba *hba)
76 {
77         struct fd_host *fd_host = hba->hba_ptr;
78
79         pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
80                 " Target Core\n", hba->hba_id, fd_host->fd_host_id);
81
82         kfree(fd_host);
83         hba->hba_ptr = NULL;
84 }
85
86 static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
87 {
88         struct fd_dev *fd_dev;
89         struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
90
91         fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
92         if (!fd_dev) {
93                 pr_err("Unable to allocate memory for struct fd_dev\n");
94                 return NULL;
95         }
96
97         fd_dev->fd_host = fd_host;
98
99         pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
100
101         return fd_dev;
102 }
103
104 /*      fd_create_virtdevice(): (Part of se_subsystem_api_t template)
105  *
106  *
107  */
108 static struct se_device *fd_create_virtdevice(
109         struct se_hba *hba,
110         struct se_subsystem_dev *se_dev,
111         void *p)
112 {
113         char *dev_p = NULL;
114         struct se_device *dev;
115         struct se_dev_limits dev_limits;
116         struct queue_limits *limits;
117         struct fd_dev *fd_dev = (struct fd_dev *) p;
118         struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
119         mm_segment_t old_fs;
120         struct file *file;
121         struct inode *inode = NULL;
122         int dev_flags = 0, flags, ret = -EINVAL;
123
124         memset(&dev_limits, 0, sizeof(struct se_dev_limits));
125
126         old_fs = get_fs();
127         set_fs(get_ds());
128         dev_p = getname(fd_dev->fd_dev_name);
129         set_fs(old_fs);
130
131         if (IS_ERR(dev_p)) {
132                 pr_err("getname(%s) failed: %lu\n",
133                         fd_dev->fd_dev_name, IS_ERR(dev_p));
134                 ret = PTR_ERR(dev_p);
135                 goto fail;
136         }
137 #if 0
138         if (di->no_create_file)
139                 flags = O_RDWR | O_LARGEFILE;
140         else
141                 flags = O_RDWR | O_CREAT | O_LARGEFILE;
142 #else
143         flags = O_RDWR | O_CREAT | O_LARGEFILE;
144 #endif
145 /*      flags |= O_DIRECT; */
146         /*
147          * If fd_buffered_io=1 has not been set explicitly (the default),
148          * use O_SYNC to force FILEIO writes to disk.
149          */
150         if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
151                 flags |= O_SYNC;
152
153         file = filp_open(dev_p, flags, 0600);
154         if (IS_ERR(file)) {
155                 pr_err("filp_open(%s) failed\n", dev_p);
156                 ret = PTR_ERR(file);
157                 goto fail;
158         }
159         if (!file || !file->f_dentry) {
160                 pr_err("filp_open(%s) failed\n", dev_p);
161                 goto fail;
162         }
163         fd_dev->fd_file = file;
164         /*
165          * If using a block backend with this struct file, we extract
166          * fd_dev->fd_[block,dev]_size from struct block_device.
167          *
168          * Otherwise, we use the passed fd_size= from configfs
169          */
170         inode = file->f_mapping->host;
171         if (S_ISBLK(inode->i_mode)) {
172                 struct request_queue *q;
173                 /*
174                  * Setup the local scope queue_limits from struct request_queue->limits
175                  * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
176                  */
177                 q = bdev_get_queue(inode->i_bdev);
178                 limits = &dev_limits.limits;
179                 limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
180                 limits->max_hw_sectors = queue_max_hw_sectors(q);
181                 limits->max_sectors = queue_max_sectors(q);
182                 /*
183                  * Determine the number of bytes from i_size_read() minus
184                  * one (1) logical sector from underlying struct block_device
185                  */
186                 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
187                 fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
188                                        fd_dev->fd_block_size);
189
190                 pr_debug("FILEIO: Using size: %llu bytes from struct"
191                         " block_device blocks: %llu logical_block_size: %d\n",
192                         fd_dev->fd_dev_size,
193                         div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
194                         fd_dev->fd_block_size);
195         } else {
196                 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
197                         pr_err("FILEIO: Missing fd_dev_size="
198                                 " parameter, and no backing struct"
199                                 " block_device\n");
200                         goto fail;
201                 }
202
203                 limits = &dev_limits.limits;
204                 limits->logical_block_size = FD_BLOCKSIZE;
205                 limits->max_hw_sectors = FD_MAX_SECTORS;
206                 limits->max_sectors = FD_MAX_SECTORS;
207                 fd_dev->fd_block_size = FD_BLOCKSIZE;
208         }
209
210         dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
211         dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
212
213         dev = transport_add_device_to_core_hba(hba, &fileio_template,
214                                 se_dev, dev_flags, fd_dev,
215                                 &dev_limits, "FILEIO", FD_VERSION);
216         if (!dev)
217                 goto fail;
218
219         fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
220         fd_dev->fd_queue_depth = dev->queue_depth;
221
222         pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
223                 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
224                         fd_dev->fd_dev_name, fd_dev->fd_dev_size);
225
226         putname(dev_p);
227         return dev;
228 fail:
229         if (fd_dev->fd_file) {
230                 filp_close(fd_dev->fd_file, NULL);
231                 fd_dev->fd_file = NULL;
232         }
233         putname(dev_p);
234         return ERR_PTR(ret);
235 }
236
237 /*      fd_free_device(): (Part of se_subsystem_api_t template)
238  *
239  *
240  */
241 static void fd_free_device(void *p)
242 {
243         struct fd_dev *fd_dev = (struct fd_dev *) p;
244
245         if (fd_dev->fd_file) {
246                 filp_close(fd_dev->fd_file, NULL);
247                 fd_dev->fd_file = NULL;
248         }
249
250         kfree(fd_dev);
251 }
252
253 static inline struct fd_request *FILE_REQ(struct se_task *task)
254 {
255         return container_of(task, struct fd_request, fd_task);
256 }
257
258
259 static struct se_task *
260 fd_alloc_task(unsigned char *cdb)
261 {
262         struct fd_request *fd_req;
263
264         fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
265         if (!fd_req) {
266                 pr_err("Unable to allocate struct fd_request\n");
267                 return NULL;
268         }
269
270         return &fd_req->fd_task;
271 }
272
273 static int fd_do_readv(struct se_task *task)
274 {
275         struct fd_request *req = FILE_REQ(task);
276         struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
277         struct file *fd = dev->fd_file;
278         struct scatterlist *sg = task->task_sg;
279         struct iovec *iov;
280         mm_segment_t old_fs;
281         loff_t pos = (task->task_lba *
282                       task->se_dev->se_sub_dev->se_dev_attrib.block_size);
283         int ret = 0, i;
284
285         iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
286         if (!iov) {
287                 pr_err("Unable to allocate fd_do_readv iov[]\n");
288                 return -ENOMEM;
289         }
290
291         for (i = 0; i < task->task_sg_nents; i++) {
292                 iov[i].iov_len = sg[i].length;
293                 iov[i].iov_base = sg_virt(&sg[i]);
294         }
295
296         old_fs = get_fs();
297         set_fs(get_ds());
298         ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
299         set_fs(old_fs);
300
301         kfree(iov);
302         /*
303          * Return zeros and GOOD status even if the READ did not return
304          * the expected virt_size for struct file w/o a backing struct
305          * block_device.
306          */
307         if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
308                 if (ret < 0 || ret != task->task_size) {
309                         pr_err("vfs_readv() returned %d,"
310                                 " expecting %d for S_ISBLK\n", ret,
311                                 (int)task->task_size);
312                         return (ret < 0 ? ret : -EINVAL);
313                 }
314         } else {
315                 if (ret < 0) {
316                         pr_err("vfs_readv() returned %d for non"
317                                 " S_ISBLK\n", ret);
318                         return ret;
319                 }
320         }
321
322         return 1;
323 }
324
325 static int fd_do_writev(struct se_task *task)
326 {
327         struct fd_request *req = FILE_REQ(task);
328         struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
329         struct file *fd = dev->fd_file;
330         struct scatterlist *sg = task->task_sg;
331         struct iovec *iov;
332         mm_segment_t old_fs;
333         loff_t pos = (task->task_lba *
334                       task->se_dev->se_sub_dev->se_dev_attrib.block_size);
335         int ret, i = 0;
336
337         iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
338         if (!iov) {
339                 pr_err("Unable to allocate fd_do_writev iov[]\n");
340                 return -ENOMEM;
341         }
342
343         for (i = 0; i < task->task_sg_nents; i++) {
344                 iov[i].iov_len = sg[i].length;
345                 iov[i].iov_base = sg_virt(&sg[i]);
346         }
347
348         old_fs = get_fs();
349         set_fs(get_ds());
350         ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
351         set_fs(old_fs);
352
353         kfree(iov);
354
355         if (ret < 0 || ret != task->task_size) {
356                 pr_err("vfs_writev() returned %d\n", ret);
357                 return (ret < 0 ? ret : -EINVAL);
358         }
359
360         return 1;
361 }
362
363 static void fd_emulate_sync_cache(struct se_task *task)
364 {
365         struct se_cmd *cmd = task->task_se_cmd;
366         struct se_device *dev = cmd->se_dev;
367         struct fd_dev *fd_dev = dev->dev_ptr;
368         int immed = (cmd->t_task_cdb[1] & 0x2);
369         loff_t start, end;
370         int ret;
371
372         /*
373          * If the Immediate bit is set, queue up the GOOD response
374          * for this SYNCHRONIZE_CACHE op
375          */
376         if (immed)
377                 transport_complete_sync_cache(cmd, 1);
378
379         /*
380          * Determine if we will be flushing the entire device.
381          */
382         if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
383                 start = 0;
384                 end = LLONG_MAX;
385         } else {
386                 start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
387                 if (cmd->data_length)
388                         end = start + cmd->data_length;
389                 else
390                         end = LLONG_MAX;
391         }
392
393         ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
394         if (ret != 0)
395                 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
396
397         if (!immed)
398                 transport_complete_sync_cache(cmd, ret == 0);
399 }
400
401 /*
402  * Tell TCM Core that we are capable of WriteCache emulation for
403  * an underlying struct se_device.
404  */
405 static int fd_emulated_write_cache(struct se_device *dev)
406 {
407         return 1;
408 }
409
410 static int fd_emulated_dpo(struct se_device *dev)
411 {
412         return 0;
413 }
414 /*
415  * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
416  * for TYPE_DISK.
417  */
418 static int fd_emulated_fua_write(struct se_device *dev)
419 {
420         return 1;
421 }
422
423 static int fd_emulated_fua_read(struct se_device *dev)
424 {
425         return 0;
426 }
427
428 /*
429  * WRITE Force Unit Access (FUA) emulation on a per struct se_task
430  * LBA range basis..
431  */
432 static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
433 {
434         struct se_device *dev = cmd->se_dev;
435         struct fd_dev *fd_dev = dev->dev_ptr;
436         loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
437         loff_t end = start + task->task_size;
438         int ret;
439
440         pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
441                         task->task_lba, task->task_size);
442
443         ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
444         if (ret != 0)
445                 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
446 }
447
448 static int fd_do_task(struct se_task *task)
449 {
450         struct se_cmd *cmd = task->task_se_cmd;
451         struct se_device *dev = cmd->se_dev;
452         int ret = 0;
453
454         /*
455          * Call vectorized fileio functions to map struct scatterlist
456          * physical memory addresses to struct iovec virtual memory.
457          */
458         if (task->task_data_direction == DMA_FROM_DEVICE) {
459                 ret = fd_do_readv(task);
460         } else {
461                 ret = fd_do_writev(task);
462
463                 if (ret > 0 &&
464                     dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
465                     dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
466                     cmd->t_tasks_fua) {
467                         /*
468                          * We might need to be a bit smarter here
469                          * and return some sense data to let the initiator
470                          * know the FUA WRITE cache sync failed..?
471                          */
472                         fd_emulate_write_fua(cmd, task);
473                 }
474
475         }
476
477         if (ret < 0)
478                 return ret;
479         if (ret) {
480                 task->task_scsi_status = GOOD;
481                 transport_complete_task(task, 1);
482         }
483         return PYX_TRANSPORT_SENT_TO_TRANSPORT;
484 }
485
486 /*      fd_free_task(): (Part of se_subsystem_api_t template)
487  *
488  *
489  */
490 static void fd_free_task(struct se_task *task)
491 {
492         struct fd_request *req = FILE_REQ(task);
493
494         kfree(req);
495 }
496
497 enum {
498         Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
499 };
500
501 static match_table_t tokens = {
502         {Opt_fd_dev_name, "fd_dev_name=%s"},
503         {Opt_fd_dev_size, "fd_dev_size=%s"},
504         {Opt_fd_buffered_io, "fd_buffered_io=%d"},
505         {Opt_err, NULL}
506 };
507
508 static ssize_t fd_set_configfs_dev_params(
509         struct se_hba *hba,
510         struct se_subsystem_dev *se_dev,
511         const char *page, ssize_t count)
512 {
513         struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
514         char *orig, *ptr, *arg_p, *opts;
515         substring_t args[MAX_OPT_ARGS];
516         int ret = 0, arg, token;
517
518         opts = kstrdup(page, GFP_KERNEL);
519         if (!opts)
520                 return -ENOMEM;
521
522         orig = opts;
523
524         while ((ptr = strsep(&opts, ",")) != NULL) {
525                 if (!*ptr)
526                         continue;
527
528                 token = match_token(ptr, tokens, args);
529                 switch (token) {
530                 case Opt_fd_dev_name:
531                         arg_p = match_strdup(&args[0]);
532                         if (!arg_p) {
533                                 ret = -ENOMEM;
534                                 break;
535                         }
536                         snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
537                                         "%s", arg_p);
538                         kfree(arg_p);
539                         pr_debug("FILEIO: Referencing Path: %s\n",
540                                         fd_dev->fd_dev_name);
541                         fd_dev->fbd_flags |= FBDF_HAS_PATH;
542                         break;
543                 case Opt_fd_dev_size:
544                         arg_p = match_strdup(&args[0]);
545                         if (!arg_p) {
546                                 ret = -ENOMEM;
547                                 break;
548                         }
549                         ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
550                         kfree(arg_p);
551                         if (ret < 0) {
552                                 pr_err("strict_strtoull() failed for"
553                                                 " fd_dev_size=\n");
554                                 goto out;
555                         }
556                         pr_debug("FILEIO: Referencing Size: %llu"
557                                         " bytes\n", fd_dev->fd_dev_size);
558                         fd_dev->fbd_flags |= FBDF_HAS_SIZE;
559                         break;
560                 case Opt_fd_buffered_io:
561                         match_int(args, &arg);
562                         if (arg != 1) {
563                                 pr_err("bogus fd_buffered_io=%d value\n", arg);
564                                 ret = -EINVAL;
565                                 goto out;
566                         }
567
568                         pr_debug("FILEIO: Using buffered I/O"
569                                 " operations for struct fd_dev\n");
570
571                         fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
572                         break;
573                 default:
574                         break;
575                 }
576         }
577
578 out:
579         kfree(orig);
580         return (!ret) ? count : ret;
581 }
582
583 static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
584 {
585         struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
586
587         if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
588                 pr_err("Missing fd_dev_name=\n");
589                 return -EINVAL;
590         }
591
592         return 0;
593 }
594
595 static ssize_t fd_show_configfs_dev_params(
596         struct se_hba *hba,
597         struct se_subsystem_dev *se_dev,
598         char *b)
599 {
600         struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
601         ssize_t bl = 0;
602
603         bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
604         bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s\n",
605                 fd_dev->fd_dev_name, fd_dev->fd_dev_size,
606                 (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
607                 "Buffered" : "Synchronous");
608         return bl;
609 }
610
611 /*      fd_get_cdb(): (Part of se_subsystem_api_t template)
612  *
613  *
614  */
615 static unsigned char *fd_get_cdb(struct se_task *task)
616 {
617         struct fd_request *req = FILE_REQ(task);
618
619         return req->fd_scsi_cdb;
620 }
621
622 /*      fd_get_device_rev(): (Part of se_subsystem_api_t template)
623  *
624  *
625  */
626 static u32 fd_get_device_rev(struct se_device *dev)
627 {
628         return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
629 }
630
631 /*      fd_get_device_type(): (Part of se_subsystem_api_t template)
632  *
633  *
634  */
635 static u32 fd_get_device_type(struct se_device *dev)
636 {
637         return TYPE_DISK;
638 }
639
640 static sector_t fd_get_blocks(struct se_device *dev)
641 {
642         struct fd_dev *fd_dev = dev->dev_ptr;
643         unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
644                         dev->se_sub_dev->se_dev_attrib.block_size);
645
646         return blocks_long;
647 }
648
649 static struct se_subsystem_api fileio_template = {
650         .name                   = "fileio",
651         .owner                  = THIS_MODULE,
652         .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
653         .attach_hba             = fd_attach_hba,
654         .detach_hba             = fd_detach_hba,
655         .allocate_virtdevice    = fd_allocate_virtdevice,
656         .create_virtdevice      = fd_create_virtdevice,
657         .free_device            = fd_free_device,
658         .dpo_emulated           = fd_emulated_dpo,
659         .fua_write_emulated     = fd_emulated_fua_write,
660         .fua_read_emulated      = fd_emulated_fua_read,
661         .write_cache_emulated   = fd_emulated_write_cache,
662         .alloc_task             = fd_alloc_task,
663         .do_task                = fd_do_task,
664         .do_sync_cache          = fd_emulate_sync_cache,
665         .free_task              = fd_free_task,
666         .check_configfs_dev_params = fd_check_configfs_dev_params,
667         .set_configfs_dev_params = fd_set_configfs_dev_params,
668         .show_configfs_dev_params = fd_show_configfs_dev_params,
669         .get_cdb                = fd_get_cdb,
670         .get_device_rev         = fd_get_device_rev,
671         .get_device_type        = fd_get_device_type,
672         .get_blocks             = fd_get_blocks,
673 };
674
675 static int __init fileio_module_init(void)
676 {
677         return transport_subsystem_register(&fileio_template);
678 }
679
680 static void fileio_module_exit(void)
681 {
682         transport_subsystem_release(&fileio_template);
683 }
684
685 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
686 MODULE_AUTHOR("nab@Linux-iSCSI.org");
687 MODULE_LICENSE("GPL");
688
689 module_init(fileio_module_init);
690 module_exit(fileio_module_exit);