PCI: Restore detection of read-only BARs
[pandora-kernel.git] / drivers / target / target_core_device.c
index ba5edec..6993961 100644 (file)
@@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_cmd->se_lun = deve->se_lun;
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
        }
        spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_lun = &se_sess->se_tpg->tpg_virt_lun0;
                se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
                se_cmd->orig_fe_lun = 0;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
        }
        /*
@@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_lun = deve->se_lun;
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
        }
        spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
 
@@ -649,6 +646,7 @@ void core_dev_unexport(
        spin_unlock(&dev->se_port_lock);
 
        se_dev_stop(dev);
+       lun->lun_sep = NULL;
        lun->lun_se_dev = NULL;
 }
 
@@ -661,7 +659,9 @@ int target_report_luns(struct se_task *se_task)
        unsigned char *buf;
        u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
 
-       buf = transport_kmap_first_data_page(se_cmd);
+       buf = transport_kmap_data_sg(se_cmd);
+       if (!buf)
+               return -ENOMEM;
 
        /*
         * If no struct se_session pointer is present, this struct se_cmd is
@@ -699,16 +699,16 @@ int target_report_luns(struct se_task *se_task)
         * See SPC3 r07, page 159.
         */
 done:
-       transport_kunmap_first_data_page(se_cmd);
        lun_count *= 8;
        buf[0] = ((lun_count >> 24) & 0xff);
        buf[1] = ((lun_count >> 16) & 0xff);
        buf[2] = ((lun_count >> 8) & 0xff);
        buf[3] = (lun_count & 0xff);
+       transport_kunmap_data_sg(se_cmd);
 
        se_task->task_scsi_status = GOOD;
        transport_complete_task(se_task, 1);
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 }
 
 /*     se_release_device_for_hba():
@@ -836,20 +836,20 @@ int se_dev_check_shutdown(struct se_device *dev)
 
 u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
 {
-       u32 tmp, aligned_max_sectors;
+       u32 aligned_max_sectors;
+       u32 alignment;
        /*
         * Limit max_sectors to a PAGE_SIZE aligned value for modern
         * transport_allocate_data_tasks() operation.
         */
-       tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
-       aligned_max_sectors = (tmp / block_size);
-       if (max_sectors != aligned_max_sectors) {
-               printk(KERN_INFO "Rounding down aligned max_sectors from %u"
-                               " to %u\n", max_sectors, aligned_max_sectors);
-               return aligned_max_sectors;
-       }
+       alignment = max(1ul, PAGE_SIZE / block_size);
+       aligned_max_sectors = rounddown(max_sectors, alignment);
 
-       return max_sectors;
+       if (max_sectors != aligned_max_sectors)
+               pr_info("Rounding down aligned max_sectors from %u to %u\n",
+                       max_sectors, aligned_max_sectors);
+
+       return aligned_max_sectors;
 }
 
 void se_dev_set_default_attribs(
@@ -957,8 +957,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       pr_err("dpo_emulated not supported\n");
-       return -EINVAL;
+       if (flag) {
+               pr_err("dpo_emulated not supported\n");
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
@@ -968,7 +972,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       if (dev->transport->fua_write_emulated == 0) {
+       if (flag && dev->transport->fua_write_emulated == 0) {
                pr_err("fua_write_emulated not supported\n");
                return -EINVAL;
        }
@@ -985,8 +989,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       pr_err("ua read emulated not supported\n");
-       return -EINVAL;
+       if (flag) {
+               pr_err("ua read emulated not supported\n");
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
@@ -995,7 +1003,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
                pr_err("Illegal value %d\n", flag);
                return -EINVAL;
        }
-       if (dev->transport->write_cache_emulated == 0) {
+       if (flag && dev->transport->write_cache_emulated == 0) {
                pr_err("write_cache_emulated not supported\n");
                return -EINVAL;
        }
@@ -1056,7 +1064,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
         * We expect this value to be non-zero when generic Block Layer
         * Discard supported is detected iblock_create_virtdevice().
         */
-       if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+       if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
                pr_err("Generic Block Discard not supported\n");
                return -ENOSYS;
        }
@@ -1077,7 +1085,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
         * We expect this value to be non-zero when generic Block Layer
         * Discard supported is detected iblock_create_virtdevice().
         */
-       if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+       if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
                pr_err("Generic Block Discard not supported\n");
                return -ENOSYS;
        }
@@ -1432,24 +1440,18 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
 
 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
        struct se_portal_group *tpg,
+       struct se_node_acl *nacl,
        u32 mapped_lun,
-       char *initiatorname,
        int *ret)
 {
        struct se_lun_acl *lacl;
-       struct se_node_acl *nacl;
 
-       if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
+       if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
                pr_err("%s InitiatorName exceeds maximum size.\n",
                        tpg->se_tpg_tfo->get_fabric_name());
                *ret = -EOVERFLOW;
                return NULL;
        }
-       nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
-       if (!nacl) {
-               *ret = -EINVAL;
-               return NULL;
-       }
        lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
        if (!lacl) {
                pr_err("Unable to allocate memory for struct se_lun_acl.\n");
@@ -1460,7 +1462,8 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
        INIT_LIST_HEAD(&lacl->lacl_list);
        lacl->mapped_lun = mapped_lun;
        lacl->se_lun_nacl = nacl;
-       snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+       snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
+                nacl->initiatorname);
 
        return lacl;
 }
@@ -1587,7 +1590,6 @@ int core_dev_setup_virtual_lun0(void)
                ret = -ENOMEM;
                goto out;
        }
-       INIT_LIST_HEAD(&se_dev->se_dev_node);
        INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
        spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
        INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);