+static inline int core_alua_state_lba_dependent(
+ struct se_cmd *cmd,
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ u8 *alua_ascq)
+{
+ struct se_device *dev = cmd->se_dev;
+ u64 segment_size, segment_mult, sectors, lba;
+
+ /* Only need to check for cdb actually containing LBAs */
+ if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
+ return 0;
+
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ segment_size = dev->t10_alua.lba_map_segment_size;
+ segment_mult = dev->t10_alua.lba_map_segment_multiplier;
+ sectors = cmd->data_length / dev->dev_attrib.block_size;
+
+ lba = cmd->t_task_lba;
+ while (lba < cmd->t_task_lba + sectors) {
+ struct t10_alua_lba_map *cur_map = NULL, *map;
+ struct t10_alua_lba_map_member *map_mem;
+
+ list_for_each_entry(map, &dev->t10_alua.lba_map_list,
+ lba_map_list) {
+ u64 start_lba, last_lba;
+ u64 first_lba = map->lba_map_first_lba;
+
+ if (segment_mult) {
+ u64 tmp = lba;
+ start_lba = sector_div(tmp, segment_size * segment_mult);
+
+ last_lba = first_lba + segment_size - 1;
+ if (start_lba >= first_lba &&
+ start_lba <= last_lba) {
+ lba += segment_size;
+ cur_map = map;
+ break;
+ }
+ } else {
+ last_lba = map->lba_map_last_lba;
+ if (lba >= first_lba && lba <= last_lba) {
+ lba = last_lba + 1;
+ cur_map = map;
+ break;
+ }
+ }
+ }
+ if (!cur_map) {
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ return 1;
+ }
+ list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
+ lba_map_mem_list) {
+ if (map_mem->lba_map_mem_alua_pg_id !=
+ tg_pt_gp->tg_pt_gp_id)
+ continue;
+ switch(map_mem->lba_map_mem_alua_state) {
+ case ALUA_ACCESS_STATE_STANDBY:
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+ return 1;
+ case ALUA_ACCESS_STATE_UNAVAILABLE:
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+ return 1;
+ default:
+ break;
+ }
+ }
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ return 0;
+}
+