target: Update QUEUE ALGORITHM MODIFIER control page default
[pandora-kernel.git] / include / target / target_core_base.h
1 #ifndef TARGET_CORE_BASE_H
2 #define TARGET_CORE_BASE_H
3
4 #include <linux/in.h>
5 #include <linux/configfs.h>
6 #include <linux/dma-mapping.h>
7 #include <linux/blkdev.h>
8 #include <scsi/scsi_cmnd.h>
9 #include <net/sock.h>
10 #include <net/tcp.h>
11
12 #define TARGET_CORE_MOD_VERSION         "v4.0.0-rc7-ml"
13 #define SHUTDOWN_SIGS   (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT))
14
15 /* Used by transport_generic_allocate_iovecs() */
16 #define TRANSPORT_IOV_DATA_BUFFER               5
17 /* Maximum Number of LUNs per Target Portal Group */
18 /* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
19 #define TRANSPORT_MAX_LUNS_PER_TPG              256
20 /*
21  * By default we use 32-byte CDBs in TCM Core and subsystem plugin code.
22  *
23  * Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and
24  * include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use
25  * 16-byte CDBs by default and require an extra allocation for
26  * 32-byte CDBs to because of legacy issues.
27  *
28  * Within TCM Core there are no such legacy limitiations, so we go ahead
29  * use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size()
30  * within all TCM Core and subsystem plugin code.
31  */
32 #define TCM_MAX_COMMAND_SIZE                    32
33 /*
34  * From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently
35  * defined 96, but the real limit is 252 (or 260 including the header)
36  */
37 #define TRANSPORT_SENSE_BUFFER                  SCSI_SENSE_BUFFERSIZE
38 /* Used by transport_send_check_condition_and_sense() */
39 #define SPC_SENSE_KEY_OFFSET                    2
40 #define SPC_ASC_KEY_OFFSET                      12
41 #define SPC_ASCQ_KEY_OFFSET                     13
42 #define TRANSPORT_IQN_LEN                       224
43 /* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */
44 #define LU_GROUP_NAME_BUF                       256
45 /* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */
46 #define TG_PT_GROUP_NAME_BUF                    256
47 /* Used to parse VPD into struct t10_vpd */
48 #define VPD_TMP_BUF_SIZE                        128
49 /* Used by transport_generic_cmd_sequencer() */
50 #define READ_BLOCK_LEN                          6
51 #define READ_CAP_LEN                            8
52 #define READ_POSITION_LEN                       20
53 #define INQUIRY_LEN                             36
54 /* Used by transport_get_inquiry_vpd_serial() */
55 #define INQUIRY_VPD_SERIAL_LEN                  254
56 /* Used by transport_get_inquiry_vpd_device_ident() */
57 #define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN       254
58
59 /* struct se_hba->hba_flags */
60 enum hba_flags_table {
61         HBA_FLAGS_INTERNAL_USE  = 0x01,
62         HBA_FLAGS_PSCSI_MODE    = 0x02,
63 };
64
65 /* struct se_lun->lun_status */
66 enum transport_lun_status_table {
67         TRANSPORT_LUN_STATUS_FREE = 0,
68         TRANSPORT_LUN_STATUS_ACTIVE = 1,
69 };
70
71 /* struct se_portal_group->se_tpg_type */
72 enum transport_tpg_type_table {
73         TRANSPORT_TPG_TYPE_NORMAL = 0,
74         TRANSPORT_TPG_TYPE_DISCOVERY = 1,
75 };
76
77 /* Used for generate timer flags */
78 enum timer_flags_table {
79         TF_RUNNING      = 0x01,
80         TF_STOP         = 0x02,
81 };
82
83 /* Special transport agnostic struct se_cmd->t_states */
84 enum transport_state_table {
85         TRANSPORT_NO_STATE      = 0,
86         TRANSPORT_NEW_CMD       = 1,
87         TRANSPORT_DEFERRED_CMD  = 2,
88         TRANSPORT_WRITE_PENDING = 3,
89         TRANSPORT_PROCESS_WRITE = 4,
90         TRANSPORT_PROCESSING    = 5,
91         TRANSPORT_COMPLETE_OK   = 6,
92         TRANSPORT_COMPLETE_FAILURE = 7,
93         TRANSPORT_COMPLETE_TIMEOUT = 8,
94         TRANSPORT_PROCESS_TMR   = 9,
95         TRANSPORT_TMR_COMPLETE  = 10,
96         TRANSPORT_ISTATE_PROCESSING = 11,
97         TRANSPORT_ISTATE_PROCESSED = 12,
98         TRANSPORT_KILL          = 13,
99         TRANSPORT_REMOVE        = 14,
100         TRANSPORT_FREE          = 15,
101         TRANSPORT_NEW_CMD_MAP   = 16,
102         TRANSPORT_FREE_CMD_INTR = 17,
103         TRANSPORT_COMPLETE_QF_WP = 18,
104 };
105
106 /* Used for struct se_cmd->se_cmd_flags */
107 enum se_cmd_flags_table {
108         SCF_SUPPORTED_SAM_OPCODE        = 0x00000001,
109         SCF_TRANSPORT_TASK_SENSE        = 0x00000002,
110         SCF_EMULATED_TASK_SENSE         = 0x00000004,
111         SCF_SCSI_DATA_SG_IO_CDB         = 0x00000008,
112         SCF_SCSI_CONTROL_SG_IO_CDB      = 0x00000010,
113         SCF_SCSI_NON_DATA_CDB           = 0x00000040,
114         SCF_SCSI_CDB_EXCEPTION          = 0x00000080,
115         SCF_SCSI_RESERVATION_CONFLICT   = 0x00000100,
116         SCF_SE_CMD_FAILED               = 0x00000400,
117         SCF_SE_LUN_CMD                  = 0x00000800,
118         SCF_SE_ALLOW_EOO                = 0x00001000,
119         SCF_SENT_CHECK_CONDITION        = 0x00004000,
120         SCF_OVERFLOW_BIT                = 0x00008000,
121         SCF_UNDERFLOW_BIT               = 0x00010000,
122         SCF_SENT_DELAYED_TAS            = 0x00020000,
123         SCF_ALUA_NON_OPTIMIZED          = 0x00040000,
124         SCF_DELAYED_CMD_FROM_SAM_ATTR   = 0x00080000,
125         SCF_UNUSED                      = 0x00100000,
126         SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
127         SCF_EMULATE_CDB_ASYNC           = 0x01000000,
128         SCF_EMULATE_QUEUE_FULL          = 0x02000000,
129 };
130
131 /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
132 enum transport_lunflags_table {
133         TRANSPORT_LUNFLAGS_NO_ACCESS            = 0x00,
134         TRANSPORT_LUNFLAGS_INITIATOR_ACCESS     = 0x01,
135         TRANSPORT_LUNFLAGS_READ_ONLY            = 0x02,
136         TRANSPORT_LUNFLAGS_READ_WRITE           = 0x04,
137 };
138
139 /* struct se_device->dev_status */
140 enum transport_device_status_table {
141         TRANSPORT_DEVICE_ACTIVATED              = 0x01,
142         TRANSPORT_DEVICE_DEACTIVATED            = 0x02,
143         TRANSPORT_DEVICE_QUEUE_FULL             = 0x04,
144         TRANSPORT_DEVICE_SHUTDOWN               = 0x08,
145         TRANSPORT_DEVICE_OFFLINE_ACTIVATED      = 0x10,
146         TRANSPORT_DEVICE_OFFLINE_DEACTIVATED    = 0x20,
147 };
148
149 /*
150  * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason
151  * to signal which ASC/ASCQ sense payload should be built.
152  */
153 enum tcm_sense_reason_table {
154         TCM_NON_EXISTENT_LUN                    = 0x01,
155         TCM_UNSUPPORTED_SCSI_OPCODE             = 0x02,
156         TCM_INCORRECT_AMOUNT_OF_DATA            = 0x03,
157         TCM_UNEXPECTED_UNSOLICITED_DATA         = 0x04,
158         TCM_SERVICE_CRC_ERROR                   = 0x05,
159         TCM_SNACK_REJECTED                      = 0x06,
160         TCM_SECTOR_COUNT_TOO_MANY               = 0x07,
161         TCM_INVALID_CDB_FIELD                   = 0x08,
162         TCM_INVALID_PARAMETER_LIST              = 0x09,
163         TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE  = 0x0a,
164         TCM_UNKNOWN_MODE_PAGE                   = 0x0b,
165         TCM_WRITE_PROTECTED                     = 0x0c,
166         TCM_CHECK_CONDITION_ABORT_CMD           = 0x0d,
167         TCM_CHECK_CONDITION_UNIT_ATTENTION      = 0x0e,
168         TCM_CHECK_CONDITION_NOT_READY           = 0x0f,
169 };
170
171 struct se_obj {
172         atomic_t obj_access_count;
173 } ____cacheline_aligned;
174
175 /*
176  * Used by TCM Core internally to signal if ALUA emulation is enabled or
177  * disabled, or running in with TCM/pSCSI passthrough mode
178  */
179 typedef enum {
180         SPC_ALUA_PASSTHROUGH,
181         SPC2_ALUA_DISABLED,
182         SPC3_ALUA_EMULATED
183 } t10_alua_index_t;
184
185 /*
186  * Used by TCM Core internally to signal if SAM Task Attribute emulation
187  * is enabled or disabled, or running in with TCM/pSCSI passthrough mode
188  */
189 typedef enum {
190         SAM_TASK_ATTR_PASSTHROUGH,
191         SAM_TASK_ATTR_UNTAGGED,
192         SAM_TASK_ATTR_EMULATED
193 } t10_task_attr_index_t;
194
195 /*
196  * Used for target SCSI statistics
197  */
198 typedef enum {
199         SCSI_INST_INDEX,
200         SCSI_DEVICE_INDEX,
201         SCSI_AUTH_INTR_INDEX,
202         SCSI_INDEX_TYPE_MAX
203 } scsi_index_t;
204
205 struct se_cmd;
206
207 struct t10_alua {
208         t10_alua_index_t alua_type;
209         /* ALUA Target Port Group ID */
210         u16     alua_tg_pt_gps_counter;
211         u32     alua_tg_pt_gps_count;
212         spinlock_t tg_pt_gps_lock;
213         struct se_subsystem_dev *t10_sub_dev;
214         /* Used for default ALUA Target Port Group */
215         struct t10_alua_tg_pt_gp *default_tg_pt_gp;
216         /* Used for default ALUA Target Port Group ConfigFS group */
217         struct config_group alua_tg_pt_gps_group;
218         int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *);
219         struct list_head tg_pt_gps_list;
220 } ____cacheline_aligned;
221
222 struct t10_alua_lu_gp {
223         u16     lu_gp_id;
224         int     lu_gp_valid_id;
225         u32     lu_gp_members;
226         atomic_t lu_gp_shutdown;
227         atomic_t lu_gp_ref_cnt;
228         spinlock_t lu_gp_lock;
229         struct config_group lu_gp_group;
230         struct list_head lu_gp_node;
231         struct list_head lu_gp_mem_list;
232 } ____cacheline_aligned;
233
234 struct t10_alua_lu_gp_member {
235         bool lu_gp_assoc;
236         atomic_t lu_gp_mem_ref_cnt;
237         spinlock_t lu_gp_mem_lock;
238         struct t10_alua_lu_gp *lu_gp;
239         struct se_device *lu_gp_mem_dev;
240         struct list_head lu_gp_mem_list;
241 } ____cacheline_aligned;
242
243 struct t10_alua_tg_pt_gp {
244         u16     tg_pt_gp_id;
245         int     tg_pt_gp_valid_id;
246         int     tg_pt_gp_alua_access_status;
247         int     tg_pt_gp_alua_access_type;
248         int     tg_pt_gp_nonop_delay_msecs;
249         int     tg_pt_gp_trans_delay_msecs;
250         int     tg_pt_gp_pref;
251         int     tg_pt_gp_write_metadata;
252         /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
253 #define ALUA_MD_BUF_LEN                         1024
254         u32     tg_pt_gp_md_buf_len;
255         u32     tg_pt_gp_members;
256         atomic_t tg_pt_gp_alua_access_state;
257         atomic_t tg_pt_gp_ref_cnt;
258         spinlock_t tg_pt_gp_lock;
259         struct mutex tg_pt_gp_md_mutex;
260         struct se_subsystem_dev *tg_pt_gp_su_dev;
261         struct config_group tg_pt_gp_group;
262         struct list_head tg_pt_gp_list;
263         struct list_head tg_pt_gp_mem_list;
264 } ____cacheline_aligned;
265
266 struct t10_alua_tg_pt_gp_member {
267         bool tg_pt_gp_assoc;
268         atomic_t tg_pt_gp_mem_ref_cnt;
269         spinlock_t tg_pt_gp_mem_lock;
270         struct t10_alua_tg_pt_gp *tg_pt_gp;
271         struct se_port *tg_pt;
272         struct list_head tg_pt_gp_mem_list;
273 } ____cacheline_aligned;
274
275 struct t10_vpd {
276         unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
277         int protocol_identifier_set;
278         u32 protocol_identifier;
279         u32 device_identifier_code_set;
280         u32 association;
281         u32 device_identifier_type;
282         struct list_head vpd_list;
283 } ____cacheline_aligned;
284
285 struct t10_wwn {
286         char vendor[8];
287         char model[16];
288         char revision[4];
289         char unit_serial[INQUIRY_VPD_SERIAL_LEN];
290         spinlock_t t10_vpd_lock;
291         struct se_subsystem_dev *t10_sub_dev;
292         struct config_group t10_wwn_group;
293         struct list_head t10_vpd_list;
294 } ____cacheline_aligned;
295
296
297 /*
298  * Used by TCM Core internally to signal if >= SPC-3 persistent reservations
299  * emulation is enabled or disabled, or running in with TCM/pSCSI passthrough
300  * mode
301  */
302 typedef enum {
303         SPC_PASSTHROUGH,
304         SPC2_RESERVATIONS,
305         SPC3_PERSISTENT_RESERVATIONS
306 } t10_reservations_index_t;
307
308 struct t10_pr_registration {
309         /* Used for fabrics that contain WWN+ISID */
310 #define PR_REG_ISID_LEN                         16
311         /* PR_REG_ISID_LEN + ',i,0x' */
312 #define PR_REG_ISID_ID_LEN                      (PR_REG_ISID_LEN + 5)
313         char pr_reg_isid[PR_REG_ISID_LEN];
314         /* Used during APTPL metadata reading */
315 #define PR_APTPL_MAX_IPORT_LEN                  256
316         unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN];
317         /* Used during APTPL metadata reading */
318 #define PR_APTPL_MAX_TPORT_LEN                  256
319         unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN];
320         /* For writing out live meta data */
321         unsigned char *pr_aptpl_buf;
322         u16 pr_aptpl_rpti;
323         u16 pr_reg_tpgt;
324         /* Reservation effects all target ports */
325         int pr_reg_all_tg_pt;
326         /* Activate Persistence across Target Power Loss */
327         int pr_reg_aptpl;
328         int pr_res_holder;
329         int pr_res_type;
330         int pr_res_scope;
331         /* Used for fabric initiator WWPNs using a ISID */
332         bool isid_present_at_reg;
333         u32 pr_res_mapped_lun;
334         u32 pr_aptpl_target_lun;
335         u32 pr_res_generation;
336         u64 pr_reg_bin_isid;
337         u64 pr_res_key;
338         atomic_t pr_res_holders;
339         struct se_node_acl *pr_reg_nacl;
340         struct se_dev_entry *pr_reg_deve;
341         struct se_lun *pr_reg_tg_pt_lun;
342         struct list_head pr_reg_list;
343         struct list_head pr_reg_abort_list;
344         struct list_head pr_reg_aptpl_list;
345         struct list_head pr_reg_atp_list;
346         struct list_head pr_reg_atp_mem_list;
347 } ____cacheline_aligned;
348
349 /*
350  * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS,
351  * SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c:
352  * core_setup_reservations()
353  */
354 struct t10_reservation_ops {
355         int (*t10_reservation_check)(struct se_cmd *, u32 *);
356         int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
357         int (*t10_pr_register)(struct se_cmd *);
358         int (*t10_pr_clear)(struct se_cmd *);
359 };
360
361 struct t10_reservation {
362         /* Reservation effects all target ports */
363         int pr_all_tg_pt;
364         /* Activate Persistence across Target Power Loss enabled
365          * for SCSI device */
366         int pr_aptpl_active;
367         /* Used by struct t10_reservation->pr_aptpl_buf_len */
368 #define PR_APTPL_BUF_LEN                        8192
369         u32 pr_aptpl_buf_len;
370         u32 pr_generation;
371         t10_reservations_index_t res_type;
372         spinlock_t registration_lock;
373         spinlock_t aptpl_reg_lock;
374         /*
375          * This will always be set by one individual I_T Nexus.
376          * However with all_tg_pt=1, other I_T Nexus from the
377          * same initiator can access PR reg/res info on a different
378          * target port.
379          *
380          * There is also the 'All Registrants' case, where there is
381          * a single *pr_res_holder of the reservation, but all
382          * registrations are considered reservation holders.
383          */
384         struct se_node_acl *pr_res_holder;
385         struct list_head registration_list;
386         struct list_head aptpl_reg_list;
387         struct t10_reservation_ops pr_ops;
388 } ____cacheline_aligned;
389
390 struct se_queue_req {
391         int                     state;
392         struct se_cmd           *cmd;
393         struct list_head        qr_list;
394 } ____cacheline_aligned;
395
396 struct se_queue_obj {
397         atomic_t                queue_cnt;
398         spinlock_t              cmd_queue_lock;
399         struct list_head        qobj_list;
400         wait_queue_head_t       thread_wq;
401 } ____cacheline_aligned;
402
403 struct se_task {
404         unsigned char   task_sense;
405         struct scatterlist *task_sg;
406         u32             task_sg_nents;
407         struct scatterlist *task_sg_bidi;
408         u8              task_scsi_status;
409         u8              task_flags;
410         int             task_error_status;
411         int             task_state_flags;
412         bool            task_padded_sg;
413         unsigned long long      task_lba;
414         u32             task_no;
415         u32             task_sectors;
416         u32             task_size;
417         enum dma_data_direction task_data_direction;
418         struct se_cmd *task_se_cmd;
419         struct se_device        *se_dev;
420         struct completion       task_stop_comp;
421         atomic_t        task_active;
422         atomic_t        task_execute_queue;
423         atomic_t        task_timeout;
424         atomic_t        task_sent;
425         atomic_t        task_stop;
426         atomic_t        task_state_active;
427         struct timer_list       task_timer;
428         struct se_device *se_obj_ptr;
429         struct list_head t_list;
430         struct list_head t_execute_list;
431         struct list_head t_state_list;
432 } ____cacheline_aligned;
433
434 struct se_cmd {
435         /* SAM response code being sent to initiator */
436         u8                      scsi_status;
437         u8                      scsi_asc;
438         u8                      scsi_ascq;
439         u8                      scsi_sense_reason;
440         u16                     scsi_sense_length;
441         /* Delay for ALUA Active/NonOptimized state access in milliseconds */
442         int                     alua_nonop_delay;
443         /* See include/linux/dma-mapping.h */
444         enum dma_data_direction data_direction;
445         /* For SAM Task Attribute */
446         int                     sam_task_attr;
447         /* Transport protocol dependent state, see transport_state_table */
448         enum transport_state_table t_state;
449         /* Transport protocol dependent state for out of order CmdSNs */
450         int                     deferred_t_state;
451         /* Transport specific error status */
452         int                     transport_error_status;
453         /* See se_cmd_flags_table */
454         u32                     se_cmd_flags;
455         u32                     se_ordered_id;
456         /* Total size in bytes associated with command */
457         u32                     data_length;
458         /* SCSI Presented Data Transfer Length */
459         u32                     cmd_spdtl;
460         u32                     residual_count;
461         u32                     orig_fe_lun;
462         /* Persistent Reservation key */
463         u64                     pr_res_key;
464         atomic_t                transport_sent;
465         /* Used for sense data */
466         void                    *sense_buffer;
467         struct list_head        se_delayed_node;
468         struct list_head        se_ordered_node;
469         struct list_head        se_lun_node;
470         struct list_head        se_qf_node;
471         struct se_device      *se_dev;
472         struct se_dev_entry   *se_deve;
473         struct se_device        *se_obj_ptr;
474         struct se_device        *se_orig_obj_ptr;
475         struct se_lun           *se_lun;
476         /* Only used for internal passthrough and legacy TCM fabric modules */
477         struct se_session       *se_sess;
478         struct se_tmr_req       *se_tmr_req;
479         struct list_head        se_queue_node;
480         struct target_core_fabric_ops *se_tfo;
481         int (*transport_emulate_cdb)(struct se_cmd *);
482         void (*transport_split_cdb)(unsigned long long, u32, unsigned char *);
483         void (*transport_wait_for_tasks)(struct se_cmd *, int, int);
484         void (*transport_complete_callback)(struct se_cmd *);
485         int (*transport_qf_callback)(struct se_cmd *);
486
487         unsigned char           *t_task_cdb;
488         unsigned char           __t_task_cdb[TCM_MAX_COMMAND_SIZE];
489         unsigned long long      t_task_lba;
490         int                     t_tasks_failed;
491         int                     t_tasks_fua;
492         bool                    t_tasks_bidi;
493         u32                     t_tasks_sg_chained_no;
494         atomic_t                t_fe_count;
495         atomic_t                t_se_count;
496         atomic_t                t_task_cdbs_left;
497         atomic_t                t_task_cdbs_ex_left;
498         atomic_t                t_task_cdbs_timeout_left;
499         atomic_t                t_task_cdbs_sent;
500         atomic_t                t_transport_aborted;
501         atomic_t                t_transport_active;
502         atomic_t                t_transport_complete;
503         atomic_t                t_transport_queue_active;
504         atomic_t                t_transport_sent;
505         atomic_t                t_transport_stop;
506         atomic_t                t_transport_timeout;
507         atomic_t                transport_dev_active;
508         atomic_t                transport_lun_active;
509         atomic_t                transport_lun_fe_stop;
510         atomic_t                transport_lun_stop;
511         spinlock_t              t_state_lock;
512         struct completion       t_transport_stop_comp;
513         struct completion       transport_lun_fe_stop_comp;
514         struct completion       transport_lun_stop_comp;
515         struct scatterlist      *t_tasks_sg_chained;
516
517         /*
518          * Used for pre-registered fabric SGL passthrough WRITE and READ
519          * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
520          * and other HW target mode fabric modules.
521          */
522         struct scatterlist      *t_task_pt_sgl;
523         u32                     t_task_pt_sgl_num;
524
525         struct scatterlist      *t_data_sg;
526         unsigned int            t_data_nents;
527         struct scatterlist      *t_bidi_data_sg;
528         unsigned int            t_bidi_data_nents;
529
530         /* Used for BIDI READ */
531         struct list_head        t_task_list;
532         u32                     t_task_list_num;
533
534 } ____cacheline_aligned;
535
536 struct se_tmr_req {
537         /* Task Management function to be preformed */
538         u8                      function;
539         /* Task Management response to send */
540         u8                      response;
541         int                     call_transport;
542         /* Reference to ITT that Task Mgmt should be preformed */
543         u32                     ref_task_tag;
544         /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */
545         u64                     ref_task_lun;
546         void                    *fabric_tmr_ptr;
547         struct se_cmd           *task_cmd;
548         struct se_cmd           *ref_cmd;
549         struct se_device        *tmr_dev;
550         struct se_lun           *tmr_lun;
551         struct list_head        tmr_list;
552 } ____cacheline_aligned;
553
554 struct se_ua {
555         u8                      ua_asc;
556         u8                      ua_ascq;
557         struct se_node_acl      *ua_nacl;
558         struct list_head        ua_dev_list;
559         struct list_head        ua_nacl_list;
560 } ____cacheline_aligned;
561
562 struct se_node_acl {
563         char                    initiatorname[TRANSPORT_IQN_LEN];
564         /* Used to signal demo mode created ACL, disabled by default */
565         bool                    dynamic_node_acl;
566         u32                     queue_depth;
567         u32                     acl_index;
568         u64                     num_cmds;
569         u64                     read_bytes;
570         u64                     write_bytes;
571         spinlock_t              stats_lock;
572         /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
573         atomic_t                acl_pr_ref_count;
574         struct se_dev_entry     *device_list;
575         struct se_session       *nacl_sess;
576         struct se_portal_group *se_tpg;
577         spinlock_t              device_list_lock;
578         spinlock_t              nacl_sess_lock;
579         struct config_group     acl_group;
580         struct config_group     acl_attrib_group;
581         struct config_group     acl_auth_group;
582         struct config_group     acl_param_group;
583         struct config_group     acl_fabric_stat_group;
584         struct config_group     *acl_default_groups[5];
585         struct list_head        acl_list;
586         struct list_head        acl_sess_list;
587 } ____cacheline_aligned;
588
589 struct se_session {
590         u64                     sess_bin_isid;
591         struct se_node_acl      *se_node_acl;
592         struct se_portal_group *se_tpg;
593         void                    *fabric_sess_ptr;
594         struct list_head        sess_list;
595         struct list_head        sess_acl_list;
596 } ____cacheline_aligned;
597
598 struct se_device;
599 struct se_transform_info;
600 struct scatterlist;
601
602 struct se_ml_stat_grps {
603         struct config_group     stat_group;
604         struct config_group     scsi_auth_intr_group;
605         struct config_group     scsi_att_intr_port_group;
606 };
607
608 struct se_lun_acl {
609         char                    initiatorname[TRANSPORT_IQN_LEN];
610         u32                     mapped_lun;
611         struct se_node_acl      *se_lun_nacl;
612         struct se_lun           *se_lun;
613         struct list_head        lacl_list;
614         struct config_group     se_lun_group;
615         struct se_ml_stat_grps  ml_stat_grps;
616 }  ____cacheline_aligned;
617
618 struct se_dev_entry {
619         bool                    def_pr_registered;
620         /* See transport_lunflags_table */
621         u32                     lun_flags;
622         u32                     deve_cmds;
623         u32                     mapped_lun;
624         u32                     average_bytes;
625         u32                     last_byte_count;
626         u32                     total_cmds;
627         u32                     total_bytes;
628         u64                     pr_res_key;
629         u64                     creation_time;
630         u32                     attach_count;
631         u64                     read_bytes;
632         u64                     write_bytes;
633         atomic_t                ua_count;
634         /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
635         atomic_t                pr_ref_count;
636         struct se_lun_acl       *se_lun_acl;
637         spinlock_t              ua_lock;
638         struct se_lun           *se_lun;
639         struct list_head        alua_port_list;
640         struct list_head        ua_list;
641 }  ____cacheline_aligned;
642
643 struct se_dev_limits {
644         /* Max supported HW queue depth */
645         u32             hw_queue_depth;
646         /* Max supported virtual queue depth */
647         u32             queue_depth;
648         /* From include/linux/blkdev.h for the other HW/SW limits. */
649         struct queue_limits limits;
650 } ____cacheline_aligned;
651
652 struct se_dev_attrib {
653         int             emulate_dpo;
654         int             emulate_fua_write;
655         int             emulate_fua_read;
656         int             emulate_write_cache;
657         int             emulate_ua_intlck_ctrl;
658         int             emulate_tas;
659         int             emulate_tpu;
660         int             emulate_tpws;
661         int             emulate_reservations;
662         int             emulate_alua;
663         int             enforce_pr_isids;
664         int             is_nonrot;
665         int             emulate_rest_reord;
666         u32             hw_block_size;
667         u32             block_size;
668         u32             hw_max_sectors;
669         u32             max_sectors;
670         u32             optimal_sectors;
671         u32             hw_queue_depth;
672         u32             queue_depth;
673         u32             task_timeout;
674         u32             max_unmap_lba_count;
675         u32             max_unmap_block_desc_count;
676         u32             unmap_granularity;
677         u32             unmap_granularity_alignment;
678         struct se_subsystem_dev *da_sub_dev;
679         struct config_group da_group;
680 } ____cacheline_aligned;
681
682 struct se_dev_stat_grps {
683         struct config_group stat_group;
684         struct config_group scsi_dev_group;
685         struct config_group scsi_tgt_dev_group;
686         struct config_group scsi_lu_group;
687 };
688
689 struct se_subsystem_dev {
690 /* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */
691 #define SE_DEV_ALIAS_LEN                512
692         unsigned char   se_dev_alias[SE_DEV_ALIAS_LEN];
693 /* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */
694 #define SE_UDEV_PATH_LEN                512
695         unsigned char   se_dev_udev_path[SE_UDEV_PATH_LEN];
696         u32             su_dev_flags;
697         struct se_hba *se_dev_hba;
698         struct se_device *se_dev_ptr;
699         struct se_dev_attrib se_dev_attrib;
700         /* T10 Asymmetric Logical Unit Assignment for Target Ports */
701         struct t10_alua t10_alua;
702         /* T10 Inquiry and VPD WWN Information */
703         struct t10_wwn  t10_wwn;
704         /* T10 SPC-2 + SPC-3 Reservations */
705         struct t10_reservation t10_pr;
706         spinlock_t      se_dev_lock;
707         void            *se_dev_su_ptr;
708         struct list_head se_dev_node;
709         struct config_group se_dev_group;
710         /* For T10 Reservations */
711         struct config_group se_dev_pr_group;
712         /* For target_core_stat.c groups */
713         struct se_dev_stat_grps dev_stat_grps;
714 } ____cacheline_aligned;
715
716 struct se_device {
717         /* Set to 1 if thread is NOT sleeping on thread_sem */
718         u8                      thread_active;
719         u8                      dev_status_timer_flags;
720         /* RELATIVE TARGET PORT IDENTIFER Counter */
721         u16                     dev_rpti_counter;
722         /* Used for SAM Task Attribute ordering */
723         u32                     dev_cur_ordered_id;
724         u32                     dev_flags;
725         u32                     dev_port_count;
726         /* See transport_device_status_table */
727         u32                     dev_status;
728         u32                     dev_tcq_window_closed;
729         /* Physical device queue depth */
730         u32                     queue_depth;
731         /* Used for SPC-2 reservations enforce of ISIDs */
732         u64                     dev_res_bin_isid;
733         t10_task_attr_index_t   dev_task_attr_type;
734         /* Pointer to transport specific device structure */
735         void                    *dev_ptr;
736         u32                     dev_index;
737         u64                     creation_time;
738         u32                     num_resets;
739         u64                     num_cmds;
740         u64                     read_bytes;
741         u64                     write_bytes;
742         spinlock_t              stats_lock;
743         /* Active commands on this virtual SE device */
744         atomic_t                active_cmds;
745         atomic_t                simple_cmds;
746         atomic_t                depth_left;
747         atomic_t                dev_ordered_id;
748         atomic_t                dev_tur_active;
749         atomic_t                execute_tasks;
750         atomic_t                dev_status_thr_count;
751         atomic_t                dev_hoq_count;
752         atomic_t                dev_ordered_sync;
753         atomic_t                dev_qf_count;
754         struct se_obj           dev_obj;
755         struct se_obj           dev_access_obj;
756         struct se_obj           dev_export_obj;
757         struct se_queue_obj     dev_queue_obj;
758         spinlock_t              delayed_cmd_lock;
759         spinlock_t              ordered_cmd_lock;
760         spinlock_t              execute_task_lock;
761         spinlock_t              state_task_lock;
762         spinlock_t              dev_alua_lock;
763         spinlock_t              dev_reservation_lock;
764         spinlock_t              dev_state_lock;
765         spinlock_t              dev_status_lock;
766         spinlock_t              dev_status_thr_lock;
767         spinlock_t              se_port_lock;
768         spinlock_t              se_tmr_lock;
769         spinlock_t              qf_cmd_lock;
770         /* Used for legacy SPC-2 reservationsa */
771         struct se_node_acl      *dev_reserved_node_acl;
772         /* Used for ALUA Logical Unit Group membership */
773         struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem;
774         /* Used for SPC-3 Persistent Reservations */
775         struct t10_pr_registration *dev_pr_res_holder;
776         struct list_head        dev_sep_list;
777         struct list_head        dev_tmr_list;
778         struct timer_list       dev_status_timer;
779         /* Pointer to descriptor for processing thread */
780         struct task_struct      *process_thread;
781         pid_t                   process_thread_pid;
782         struct task_struct              *dev_mgmt_thread;
783         struct work_struct      qf_work_queue;
784         struct list_head        delayed_cmd_list;
785         struct list_head        ordered_cmd_list;
786         struct list_head        execute_task_list;
787         struct list_head        state_task_list;
788         struct list_head        qf_cmd_list;
789         /* Pointer to associated SE HBA */
790         struct se_hba           *se_hba;
791         struct se_subsystem_dev *se_sub_dev;
792         /* Pointer to template of function pointers for transport */
793         struct se_subsystem_api *transport;
794         /* Linked list for struct se_hba struct se_device list */
795         struct list_head        dev_list;
796         /* Linked list for struct se_global->g_se_dev_list */
797         struct list_head        g_se_dev_list;
798 }  ____cacheline_aligned;
799
800 struct se_hba {
801         u16                     hba_tpgt;
802         u32                     hba_id;
803         /* See hba_flags_table */
804         u32                     hba_flags;
805         /* Virtual iSCSI devices attached. */
806         u32                     dev_count;
807         u32                     hba_index;
808         /* Pointer to transport specific host structure. */
809         void                    *hba_ptr;
810         /* Linked list for struct se_device */
811         struct list_head        hba_dev_list;
812         struct list_head        hba_node;
813         spinlock_t              device_lock;
814         struct config_group     hba_group;
815         struct mutex            hba_access_mutex;
816         struct se_subsystem_api *transport;
817 }  ____cacheline_aligned;
818
819 struct se_port_stat_grps {
820         struct config_group stat_group;
821         struct config_group scsi_port_group;
822         struct config_group scsi_tgt_port_group;
823         struct config_group scsi_transport_group;
824 };
825
826 struct se_lun {
827         /* See transport_lun_status_table */
828         enum transport_lun_status_table lun_status;
829         u32                     lun_access;
830         u32                     lun_flags;
831         u32                     unpacked_lun;
832         atomic_t                lun_acl_count;
833         spinlock_t              lun_acl_lock;
834         spinlock_t              lun_cmd_lock;
835         spinlock_t              lun_sep_lock;
836         struct completion       lun_shutdown_comp;
837         struct list_head        lun_cmd_list;
838         struct list_head        lun_acl_list;
839         struct se_device        *lun_se_dev;
840         struct se_port          *lun_sep;
841         struct config_group     lun_group;
842         struct se_port_stat_grps port_stat_grps;
843 } ____cacheline_aligned;
844
845 struct scsi_port_stats {
846        u64     cmd_pdus;
847        u64     tx_data_octets;
848        u64     rx_data_octets;
849 } ____cacheline_aligned;
850
851 struct se_port {
852         /* RELATIVE TARGET PORT IDENTIFER */
853         u16             sep_rtpi;
854         int             sep_tg_pt_secondary_stat;
855         int             sep_tg_pt_secondary_write_md;
856         u32             sep_index;
857         struct scsi_port_stats sep_stats;
858         /* Used for ALUA Target Port Groups membership */
859         atomic_t        sep_tg_pt_gp_active;
860         atomic_t        sep_tg_pt_secondary_offline;
861         /* Used for PR ALL_TG_PT=1 */
862         atomic_t        sep_tg_pt_ref_cnt;
863         spinlock_t      sep_alua_lock;
864         struct mutex    sep_tg_pt_md_mutex;
865         struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem;
866         struct se_lun *sep_lun;
867         struct se_portal_group *sep_tpg;
868         struct list_head sep_alua_list;
869         struct list_head sep_list;
870 } ____cacheline_aligned;
871
872 struct se_tpg_np {
873         struct se_portal_group *tpg_np_parent;
874         struct config_group     tpg_np_group;
875 } ____cacheline_aligned;
876
877 struct se_portal_group {
878         /* Type of target portal group, see transport_tpg_type_table */
879         enum transport_tpg_type_table se_tpg_type;
880         /* Number of ACLed Initiator Nodes for this TPG */
881         u32                     num_node_acls;
882         /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
883         atomic_t                tpg_pr_ref_count;
884         /* Spinlock for adding/removing ACLed Nodes */
885         spinlock_t              acl_node_lock;
886         /* Spinlock for adding/removing sessions */
887         spinlock_t              session_lock;
888         spinlock_t              tpg_lun_lock;
889         /* Pointer to $FABRIC_MOD portal group */
890         void                    *se_tpg_fabric_ptr;
891         struct list_head        se_tpg_node;
892         /* linked list for initiator ACL list */
893         struct list_head        acl_node_list;
894         struct se_lun           *tpg_lun_list;
895         struct se_lun           tpg_virt_lun0;
896         /* List of TCM sessions associated wth this TPG */
897         struct list_head        tpg_sess_list;
898         /* Pointer to $FABRIC_MOD dependent code */
899         struct target_core_fabric_ops *se_tpg_tfo;
900         struct se_wwn           *se_tpg_wwn;
901         struct config_group     tpg_group;
902         struct config_group     *tpg_default_groups[6];
903         struct config_group     tpg_lun_group;
904         struct config_group     tpg_np_group;
905         struct config_group     tpg_acl_group;
906         struct config_group     tpg_attrib_group;
907         struct config_group     tpg_param_group;
908 } ____cacheline_aligned;
909
910 struct se_wwn {
911         struct target_fabric_configfs *wwn_tf;
912         struct config_group     wwn_group;
913         struct config_group     *wwn_default_groups[2];
914         struct config_group     fabric_stat_group;
915 } ____cacheline_aligned;
916
917 #endif /* TARGET_CORE_BASE_H */