dev->se_hba = hba;
dev->se_sub_dev = se_dev;
dev->transport = transport;
- atomic_set(&dev->active_cmds, 0);
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->execute_task_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
- INIT_LIST_HEAD(&dev->ordered_cmd_list);
INIT_LIST_HEAD(&dev->state_task_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
- spin_lock_init(&dev->ordered_cmd_lock);
- spin_lock_init(&dev->state_task_lock);
- spin_lock_init(&dev->dev_alua_lock);
spin_lock_init(&dev->dev_reservation_lock);
spin_lock_init(&dev->dev_status_lock);
- spin_lock_init(&dev->dev_status_thr_lock);
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
case VERIFY_16: /* SBC - VRProtect */
case WRITE_VERIFY: /* SBC - VRProtect */
case WRITE_VERIFY_12: /* SBC - VRProtect */
+ case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
break;
default:
cdb[1] &= 0x1f; /* clear logical unit number */
{
INIT_LIST_HEAD(&cmd->se_lun_node);
INIT_LIST_HEAD(&cmd->se_delayed_node);
- INIT_LIST_HEAD(&cmd->se_ordered_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
INIT_LIST_HEAD(&cmd->se_queue_node);
INIT_LIST_HEAD(&cmd->se_cmd_list);
case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
case TCM_UNKNOWN_MODE_PAGE:
case TCM_WRITE_PROTECTED:
+ case TCM_ADDRESS_OUT_OF_RANGE:
case TCM_CHECK_CONDITION_ABORT_CMD:
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
case TCM_CHECK_CONDITION_NOT_READY:
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_inc(&cmd->se_dev->dev_hoq_count);
- smp_mb__after_atomic_inc();
pr_debug("Added HEAD_OF_QUEUE for CDB:"
" 0x%02x, se_ordered_id: %u\n",
cmd->t_task_cdb[0],
cmd->se_ordered_id);
return 1;
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&cmd->se_dev->ordered_cmd_lock);
- list_add_tail(&cmd->se_ordered_node,
- &cmd->se_dev->ordered_cmd_list);
- spin_unlock(&cmd->se_dev->ordered_cmd_lock);
-
atomic_inc(&cmd->se_dev->dev_ordered_sync);
smp_mb__after_atomic_inc();
/*
* Everything else assume TYPE_DISK Sector CDB location.
- * Use 8-bit sector value.
+ * Use 8-bit sector value. SBC-3 says:
+ *
+ * A TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be written. Any other value
+ * specifies the number of logical blocks that shall be
+ * written.
*/
type_disk:
- return (u32)cdb[4];
+ return cdb[4] ? : 256;
}
static inline u32 transport_get_sectors_10(
cmd, cdb, pr_reg_type) != 0) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+ cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EBUSY;
}
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_12:
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_16:
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_64(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) ||
- !(cmd->t_tasks_bidi))
+ !(cmd->se_cmd_flags & SCF_BIDI))
goto out_invalid_cdb_field;
sectors = transport_get_sectors_10(cdb, cmd, §or_ret);
if (sector_ret)
* Setup BIDI XOR callback to be run after I/O completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
break;
case VARIABLE_LENGTH_CMD:
service_action = get_unaligned_be16(&cdb[8]);
* completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
- cmd->t_tasks_fua = (cdb[10] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb, cmd, §or_ret);
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
if (target_check_write_same_discard(&cdb[10], dev) < 0)
- goto out_invalid_cdb_field;
+ goto out_unsupported_cdb;
if (!passthrough)
cmd->execute_task = target_emulate_write_same;
break;
/*
* Check for emulated MI_REPORT_TARGET_PGS.
*/
- if (cdb[1] == MI_REPORT_TARGET_PGS &&
+ if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
cmd->execute_task =
target_emulate_report_target_port_groups;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
if (target_check_write_same_discard(&cdb[1], dev) < 0)
- goto out_invalid_cdb_field;
+ goto out_unsupported_cdb;
if (!passthrough)
cmd->execute_task = target_emulate_write_same;
break;
* of byte 1 bit 3 UNMAP instead of original reserved field
*/
if (target_check_write_same_discard(&cdb[1], dev) < 0)
- goto out_invalid_cdb_field;
+ goto out_unsupported_cdb;
if (!passthrough)
cmd->execute_task = target_emulate_write_same;
break;
/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
goto out_invalid_cdb_field;
}
-
+ /*
+ * For the overflow case keep the existing fabric provided
+ * ->data_length. Otherwise for the underflow case, reset
+ * ->data_length to the smaller SCSI expected data transfer
+ * length.
+ */
if (size > cmd->data_length) {
cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
cmd->residual_count = (size - cmd->data_length);
} else {
cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
cmd->residual_count = (cmd->data_length - size);
+ cmd->data_length = size;
}
- cmd->data_length = size;
}
/* reject any command that we don't have a handler for */
(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
goto out_unsupported_cdb;
- /* Let's limit control cdbs to a page, for simplicity's sake. */
- if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
- size > PAGE_SIZE)
- goto out_invalid_cdb_field;
-
transport_set_supported_SAM_opcode(cmd);
return ret;
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_dec(&dev->dev_hoq_count);
- smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for"
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&dev->ordered_cmd_lock);
- list_del(&cmd->se_ordered_node);
atomic_dec(&dev->dev_ordered_sync);
smp_mb__after_atomic_dec();
- spin_unlock(&dev->ordered_cmd_lock);
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
ret = cmd->se_tfo->queue_status(cmd);
- if (ret)
- goto out;
+ goto out;
}
switch (cmd->data_direction) {
if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+ /*
+ * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+ * scatterlists already have been set to follow what the fabric
+ * passes for the original expected data transfer length.
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ pr_warn("Rejecting SCSI DATA overflow for fabric using"
+ " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
+ }
cmd->t_data_sg = sgl;
cmd->t_data_nents = sgl_count;
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
-void *transport_kmap_first_data_page(struct se_cmd *cmd)
+void *transport_kmap_data_sg(struct se_cmd *cmd)
{
struct scatterlist *sg = cmd->t_data_sg;
+ struct page **pages;
+ int i;
BUG_ON(!sg);
/*
* tcm_loop who may be using a contig buffer from the SCSI midlayer for
* control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
*/
- return kmap(sg_page(sg)) + sg->offset;
+ if (!cmd->t_data_nents)
+ return NULL;
+ else if (cmd->t_data_nents == 1)
+ return kmap(sg_page(sg)) + sg->offset;
+
+ /* >1 page. use vmap */
+ pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
+ if (!pages)
+ return NULL;
+
+ /* convert sg[] to pages[] */
+ for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
+ pages[i] = sg_page(sg);
+ }
+
+ cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
+ kfree(pages);
+ if (!cmd->t_data_vmap)
+ return NULL;
+
+ return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
}
-EXPORT_SYMBOL(transport_kmap_first_data_page);
+EXPORT_SYMBOL(transport_kmap_data_sg);
-void transport_kunmap_first_data_page(struct se_cmd *cmd)
+void transport_kunmap_data_sg(struct se_cmd *cmd)
{
- kunmap(sg_page(cmd->t_data_sg));
+ if (!cmd->t_data_nents)
+ return;
+ else if (cmd->t_data_nents == 1)
+ kunmap(sg_page(cmd->t_data_sg));
+
+ vunmap(cmd->t_data_vmap);
+ cmd->t_data_vmap = NULL;
}
-EXPORT_SYMBOL(transport_kunmap_first_data_page);
+EXPORT_SYMBOL(transport_kunmap_data_sg);
static int
transport_generic_get_mem(struct se_cmd *cmd)
return 0;
out:
- while (i >= 0) {
- __free_page(sg_page(&cmd->t_data_sg[i]));
+ while (i > 0) {
i--;
+ __free_page(sg_page(&cmd->t_data_sg[i]));
}
kfree(cmd->t_data_sg);
cmd->t_data_sg = NULL;
struct se_task *task;
unsigned long flags;
+ /* Workaround for handling zero-length control CDBs */
+ if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
+ !cmd->data_length)
+ return 0;
+
task = transport_generic_get_task(cmd, cmd->data_direction);
if (!task)
return -ENOMEM;
task_cdbs = transport_allocate_control_task(cmd);
}
- if (task_cdbs <= 0)
+ if (task_cdbs < 0)
goto out_fail;
+ else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+ cmd->t_state = TRANSPORT_COMPLETE;
+ atomic_set(&cmd->t_transport_active, 1);
+
+ if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
+ u8 ua_asc = 0, ua_ascq = 0;
+
+ core_scsi3_ua_clear_for_request_sense(cmd,
+ &ua_asc, &ua_ascq);
+ }
+
+ INIT_WORK(&cmd->work, target_complete_ok_work);
+ queue_work(target_completion_wq, &cmd->work);
+ return 0;
+ }
if (set_counts) {
atomic_inc(&cmd->t_fe_count);
case TCM_NON_EXISTENT_LUN:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* LOGICAL UNIT NOT SUPPORTED */
case TCM_SECTOR_COUNT_TOO_MANY:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* INVALID COMMAND OPERATION CODE */
case TCM_UNKNOWN_MODE_PAGE:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* INVALID FIELD IN CDB */
case TCM_CHECK_CONDITION_ABORT_CMD:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* BUS DEVICE RESET FUNCTION OCCURRED */
case TCM_INCORRECT_AMOUNT_OF_DATA:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* WRITE ERROR */
case TCM_INVALID_CDB_FIELD:
/* CURRENT ERROR */
buffer[offset] = 0x70;
- /* ABORTED COMMAND */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* INVALID FIELD IN CDB */
buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
break;
case TCM_INVALID_PARAMETER_LIST:
/* CURRENT ERROR */
buffer[offset] = 0x70;
- /* ABORTED COMMAND */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* INVALID FIELD IN PARAMETER LIST */
buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
break;
case TCM_UNEXPECTED_UNSOLICITED_DATA:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* WRITE ERROR */
case TCM_SERVICE_CRC_ERROR:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* PROTOCOL SERVICE CRC ERROR */
case TCM_SNACK_REJECTED:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* READ ERROR */
case TCM_WRITE_PROTECTED:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* DATA PROTECT */
buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
/* WRITE PROTECTED */
buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
break;
+ case TCM_ADDRESS_OUT_OF_RANGE:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21;
+ break;
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* UNIT ATTENTION */
buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
case TCM_CHECK_CONDITION_NOT_READY:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* Not Ready */
buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
transport_get_sense_codes(cmd, &asc, &ascq);
default:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* LOGICAL UNIT COMMUNICATION FAILURE */
- buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x08;
break;
}
/*