Merge branch 'master' into for-next
[pandora-kernel.git] / drivers / target / target_core_transport.c
index 39ac190..b9d3501 100644 (file)
@@ -227,8 +227,6 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
 static void transport_stop_all_task_timers(struct se_cmd *cmd);
 
-int transport_emulate_control_cdb(struct se_task *task);
-
 int init_se_global(void)
 {
        struct se_global *global;
@@ -721,7 +719,7 @@ static int transport_cmd_check_stop(
                        cmd->se_lun = NULL;
                        /*
                         * Some fabric modules like tcm_loop can release
-                        * their internally allocated I/O refrence now and
+                        * their internally allocated I/O reference now and
                         * struct se_cmd now.
                         */
                        if (CMD_TFO(cmd)->check_stop_free != NULL) {
@@ -1622,7 +1620,7 @@ struct se_device *transport_add_device_to_core_hba(
        const char *inquiry_prod,
        const char *inquiry_rev)
 {
-       int ret = 0, force_pt;
+       int force_pt;
        struct se_device  *dev;
 
        dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
@@ -1739,9 +1737,8 @@ struct se_device *transport_add_device_to_core_hba(
        }
        scsi_dump_inquiry(dev);
 
+       return dev;
 out:
-       if (!ret)
-               return dev;
        kthread_stop(dev->process_thread);
 
        spin_lock(&hba->device_lock);
@@ -2032,7 +2029,7 @@ int transport_generic_handle_data(
         * If the received CDB has aleady been ABORTED by the generic
         * target engine, we now call transport_check_aborted_status()
         * to queue any delated TASK_ABORTED status for the received CDB to the
-        * fabric module as we are expecting no futher incoming DATA OUT
+        * fabric module as we are expecting no further incoming DATA OUT
         * sequences at this point.
         */
        if (transport_check_aborted_status(cmd, 1) != 0)
@@ -2504,7 +2501,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
        if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
                return 1;
        /*
-        * Check for the existance of HEAD_OF_QUEUE, and if true return 1
+        * Check for the existence of HEAD_OF_QUEUE, and if true return 1
         * to allow the passed struct se_cmd list of tasks to the front of the list.
         */
         if (cmd->sam_task_attr == TASK_ATTR_HOQ) {
@@ -2550,7 +2547,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
        if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) {
                /*
                 * Otherwise, add cmd w/ tasks to delayed cmd queue that
-                * will be drained upon competion of HEAD_OF_QUEUE task.
+                * will be drained upon completion of HEAD_OF_QUEUE task.
                 */
                spin_lock(&SE_DEV(cmd)->delayed_cmd_lock);
                cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
@@ -2592,7 +2589,7 @@ static int transport_execute_tasks(struct se_cmd *cmd)
        }
        /*
         * Call transport_cmd_check_stop() to see if a fabric exception
-        * has occured that prevents execution.
+        * has occurred that prevents execution.
         */
        if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) {
                /*
@@ -3112,7 +3109,7 @@ static int transport_generic_cmd_sequencer(
        if (ret != 0) {
                cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
                /*
-                * Set SCSI additional sense code (ASC) to 'LUN Not Accessable';
+                * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
                 * The ALUA additional sense code qualifier (ASCQ) is determined
                 * by the ALUA primary or secondary access state..
                 */
@@ -3870,7 +3867,7 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
                }
        }
        /*
-        * Check for a callback, used by amoungst other things
+        * Check for a callback, used by amongst other things
         * XDWRITE_READ_10 emulation.
         */
        if (cmd->transport_complete_callback)
@@ -4359,11 +4356,9 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
                        printk(KERN_ERR "Unable to allocate struct se_mem\n");
                        goto out;
                }
-               INIT_LIST_HEAD(&se_mem->se_list);
-               se_mem->se_len = (length > dma_size) ? dma_size : length;
 
 /* #warning FIXME Allocate contigous pages for struct se_mem elements */
-               se_mem->se_page = (struct page *) alloc_pages(GFP_KERNEL, 0);
+               se_mem->se_page = alloc_pages(GFP_KERNEL, 0);
                if (!(se_mem->se_page)) {
                        printk(KERN_ERR "alloc_pages() failed\n");
                        goto out;
@@ -4374,6 +4369,8 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
                        printk(KERN_ERR "kmap_atomic() failed\n");
                        goto out;
                }
+               INIT_LIST_HEAD(&se_mem->se_list);
+               se_mem->se_len = (length > dma_size) ? dma_size : length;
                memset(buf, 0, se_mem->se_len);
                kunmap_atomic(buf, KM_IRQ0);
 
@@ -4392,10 +4389,13 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
 
        return 0;
 out:
+       if (se_mem)
+               __free_pages(se_mem->se_page, 0);
+       kmem_cache_free(se_mem_cache, se_mem);
        return -1;
 }
 
-extern u32 transport_calc_sg_num(
+u32 transport_calc_sg_num(
        struct se_task *task,
        struct se_mem *in_se_mem,
        u32 task_offset)
@@ -5834,31 +5834,26 @@ int transport_generic_do_tmr(struct se_cmd *cmd)
        int ret;
 
        switch (tmr->function) {
-       case ABORT_TASK:
+       case TMR_ABORT_TASK:
                ref_cmd = tmr->ref_cmd;
                tmr->response = TMR_FUNCTION_REJECTED;
                break;
-       case ABORT_TASK_SET:
-       case CLEAR_ACA:
-       case CLEAR_TASK_SET:
+       case TMR_ABORT_TASK_SET:
+       case TMR_CLEAR_ACA:
+       case TMR_CLEAR_TASK_SET:
                tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
                break;
-       case LUN_RESET:
+       case TMR_LUN_RESET:
                ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
                tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
                                         TMR_FUNCTION_REJECTED;
                break;
-#if 0
-       case TARGET_WARM_RESET:
-               transport_generic_host_reset(dev->se_hba);
+       case TMR_TARGET_WARM_RESET:
                tmr->response = TMR_FUNCTION_REJECTED;
                break;
-       case TARGET_COLD_RESET:
-               transport_generic_host_reset(dev->se_hba);
-               transport_generic_cold_reset(dev->se_hba);
+       case TMR_TARGET_COLD_RESET:
                tmr->response = TMR_FUNCTION_REJECTED;
                break;
-#endif
        default:
                printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
                                tmr->function);