2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
59 #include "scsi_logging.h"
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST 1
87 #define DEF_NUM_TGTS 1
88 #define DEF_MAX_LUNS 1
89 /* With these defaults, this driver will make 1 host with 1 target
90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
94 #define DEF_DEV_SIZE_MB 8
98 #define DEF_EVERY_NTH 0
103 #define DEF_LBPWS10 0
104 #define DEF_LOWEST_ALIGNED 0
105 #define DEF_NO_LUN_0 0
106 #define DEF_NUM_PARTS 0
108 #define DEF_OPT_BLKS 64
109 #define DEF_PHYSBLK_EXP 0
111 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
112 #define DEF_SECTOR_SIZE 512
113 #define DEF_UNMAP_ALIGNMENT 0
114 #define DEF_UNMAP_GRANULARITY 1
115 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
116 #define DEF_UNMAP_MAX_DESC 256
117 #define DEF_VIRTUAL_GB 0
118 #define DEF_VPD_USE_HOSTNO 1
119 #define DEF_WRITESAME_LENGTH 0xFFFF
121 /* bit mask values for scsi_debug_opts */
122 #define SCSI_DEBUG_OPT_NOISE 1
123 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
124 #define SCSI_DEBUG_OPT_TIMEOUT 4
125 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
126 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
127 #define SCSI_DEBUG_OPT_DIF_ERR 32
128 #define SCSI_DEBUG_OPT_DIX_ERR 64
129 /* When "every_nth" > 0 then modulo "every_nth" commands:
130 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
131 * - a RECOVERED_ERROR is simulated on successful read and write
132 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
133 * - a TRANSPORT_ERROR is simulated on successful read and write
134 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
136 * When "every_nth" < 0 then after "- every_nth" commands:
137 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
138 * - a RECOVERED_ERROR is simulated on successful read and write
139 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
140 * - a TRANSPORT_ERROR is simulated on successful read and write
141 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
142 * This will continue until some other action occurs (e.g. the user
143 * writing a new value (other than -1 or 1) to every_nth via sysfs).
146 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
147 * sector on read commands: */
148 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
149 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
151 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
152 * or "peripheral device" addressing (value 0) */
153 #define SAM2_LUN_ADDRESS_METHOD 0
154 #define SAM2_WLUN_REPORT_LUNS 0xc101
156 /* Can queue up to this number of commands. Typically commands that
157 * that have a non-zero delay are queued. */
158 #define SCSI_DEBUG_CANQUEUE 255
160 static int scsi_debug_add_host = DEF_NUM_HOST;
161 static int scsi_debug_ato = DEF_ATO;
162 static int scsi_debug_delay = DEF_DELAY;
163 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
164 static int scsi_debug_dif = DEF_DIF;
165 static int scsi_debug_dix = DEF_DIX;
166 static int scsi_debug_dsense = DEF_D_SENSE;
167 static int scsi_debug_every_nth = DEF_EVERY_NTH;
168 static int scsi_debug_fake_rw = DEF_FAKE_RW;
169 static int scsi_debug_guard = DEF_GUARD;
170 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
171 static int scsi_debug_max_luns = DEF_MAX_LUNS;
172 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
173 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
174 static int scsi_debug_no_uld = 0;
175 static int scsi_debug_num_parts = DEF_NUM_PARTS;
176 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
177 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
178 static int scsi_debug_opts = DEF_OPTS;
179 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
180 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
181 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
182 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
183 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
184 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
185 static unsigned int scsi_debug_lbpu = DEF_LBPU;
186 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
187 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
188 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
189 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
190 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
191 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
192 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
194 static int scsi_debug_cmnd_count = 0;
196 #define DEV_READONLY(TGT) (0)
197 #define DEV_REMOVEABLE(TGT) (0)
199 static unsigned int sdebug_store_sectors;
200 static sector_t sdebug_capacity; /* in sectors */
202 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
203 may still need them */
204 static int sdebug_heads; /* heads per disk */
205 static int sdebug_cylinders_per; /* cylinders per surface */
206 static int sdebug_sectors_per; /* sectors per cylinder */
208 #define SDEBUG_MAX_PARTS 4
210 #define SDEBUG_SENSE_LEN 32
212 #define SCSI_DEBUG_MAX_CMD_LEN 32
214 static unsigned int scsi_debug_lbp(void)
216 return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
219 struct sdebug_dev_info {
220 struct list_head dev_list;
221 unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
222 unsigned int channel;
225 struct sdebug_host_info *sdbg_host;
232 struct sdebug_host_info {
233 struct list_head host_list;
234 struct Scsi_Host *shost;
236 struct list_head dev_info_list;
239 #define to_sdebug_host(d) \
240 container_of(d, struct sdebug_host_info, dev)
242 static LIST_HEAD(sdebug_host_list);
243 static DEFINE_SPINLOCK(sdebug_host_list_lock);
245 typedef void (* done_funct_t) (struct scsi_cmnd *);
247 struct sdebug_queued_cmd {
249 struct timer_list cmnd_timer;
250 done_funct_t done_funct;
251 struct scsi_cmnd * a_cmnd;
254 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
256 static unsigned char * fake_storep; /* ramdisk storage */
257 static unsigned char *dif_storep; /* protection info */
258 static void *map_storep; /* provisioning map */
260 static unsigned long map_size;
261 static int num_aborts = 0;
262 static int num_dev_resets = 0;
263 static int num_bus_resets = 0;
264 static int num_host_resets = 0;
265 static int dix_writes;
266 static int dix_reads;
267 static int dif_errors;
269 static DEFINE_SPINLOCK(queued_arr_lock);
270 static DEFINE_RWLOCK(atomic_rw);
272 static char sdebug_proc_name[] = "scsi_debug";
274 static struct bus_type pseudo_lld_bus;
276 static inline sector_t dif_offset(sector_t sector)
281 static struct device_driver sdebug_driverfs_driver = {
282 .name = sdebug_proc_name,
283 .bus = &pseudo_lld_bus,
286 static const int check_condition_result =
287 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
289 static const int illegal_condition_result =
290 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
292 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
294 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
297 static int sdebug_add_adapter(void);
298 static void sdebug_remove_adapter(void);
300 static void sdebug_max_tgts_luns(void)
302 struct sdebug_host_info *sdbg_host;
303 struct Scsi_Host *hpnt;
305 spin_lock(&sdebug_host_list_lock);
306 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
307 hpnt = sdbg_host->shost;
308 if ((hpnt->this_id >= 0) &&
309 (scsi_debug_num_tgts > hpnt->this_id))
310 hpnt->max_id = scsi_debug_num_tgts + 1;
312 hpnt->max_id = scsi_debug_num_tgts;
313 /* scsi_debug_max_luns; */
314 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
316 spin_unlock(&sdebug_host_list_lock);
319 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
322 unsigned char *sbuff;
324 sbuff = devip->sense_buff;
325 memset(sbuff, 0, SDEBUG_SENSE_LEN);
327 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
329 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
330 printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: "
331 "[0x%x,0x%x,0x%x]\n", key, asc, asq);
334 static void get_data_transfer_info(unsigned char *cmd,
335 unsigned long long *lba, unsigned int *num,
341 case VARIABLE_LENGTH_CMD:
342 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
343 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
344 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
345 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
347 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
348 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
350 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
357 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
358 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
359 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
360 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
362 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
367 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
370 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
377 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
380 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
384 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
385 (u32)(cmd[1] & 0x1f) << 16;
386 *num = (0 == cmd[4]) ? 256 : cmd[4];
393 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
395 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
396 printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
399 /* return -ENOTTY; // correct return but upsets fdisk */
402 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
403 struct sdebug_dev_info * devip)
406 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
407 printk(KERN_INFO "scsi_debug: Reporting Unit "
408 "attention: power on reset\n");
410 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
411 return check_condition_result;
413 if ((0 == reset_only) && devip->stopped) {
414 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
415 printk(KERN_INFO "scsi_debug: Reporting Not "
416 "ready: initializing command required\n");
417 mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
419 return check_condition_result;
424 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
425 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
429 struct scsi_data_buffer *sdb = scsi_in(scp);
433 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
434 return (DID_ERROR << 16);
436 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
439 sdb->resid -= act_len;
441 sdb->resid = scsi_bufflen(scp) - act_len;
446 /* Returns number of bytes fetched into 'arr' or -1 if error. */
447 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
450 if (!scsi_bufflen(scp))
452 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
455 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
459 static const char * inq_vendor_id = "Linux ";
460 static const char * inq_product_id = "scsi_debug ";
461 static const char * inq_product_rev = "0004";
463 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
464 int target_dev_id, int dev_id_num,
465 const char * dev_id_str,
471 port_a = target_dev_id + 1;
472 /* T10 vendor identifier field format (faked) */
473 arr[0] = 0x2; /* ASCII */
476 memcpy(&arr[4], inq_vendor_id, 8);
477 memcpy(&arr[12], inq_product_id, 16);
478 memcpy(&arr[28], dev_id_str, dev_id_str_len);
479 num = 8 + 16 + dev_id_str_len;
482 if (dev_id_num >= 0) {
483 /* NAA-5, Logical unit identifier (binary) */
484 arr[num++] = 0x1; /* binary (not necessarily sas) */
485 arr[num++] = 0x3; /* PIV=0, lu, naa */
488 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
492 arr[num++] = (dev_id_num >> 24);
493 arr[num++] = (dev_id_num >> 16) & 0xff;
494 arr[num++] = (dev_id_num >> 8) & 0xff;
495 arr[num++] = dev_id_num & 0xff;
496 /* Target relative port number */
497 arr[num++] = 0x61; /* proto=sas, binary */
498 arr[num++] = 0x94; /* PIV=1, target port, rel port */
499 arr[num++] = 0x0; /* reserved */
500 arr[num++] = 0x4; /* length */
501 arr[num++] = 0x0; /* reserved */
502 arr[num++] = 0x0; /* reserved */
504 arr[num++] = 0x1; /* relative port A */
506 /* NAA-5, Target port identifier */
507 arr[num++] = 0x61; /* proto=sas, binary */
508 arr[num++] = 0x93; /* piv=1, target port, naa */
511 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
515 arr[num++] = (port_a >> 24);
516 arr[num++] = (port_a >> 16) & 0xff;
517 arr[num++] = (port_a >> 8) & 0xff;
518 arr[num++] = port_a & 0xff;
519 /* NAA-5, Target port group identifier */
520 arr[num++] = 0x61; /* proto=sas, binary */
521 arr[num++] = 0x95; /* piv=1, target port group id */
526 arr[num++] = (port_group_id >> 8) & 0xff;
527 arr[num++] = port_group_id & 0xff;
528 /* NAA-5, Target device identifier */
529 arr[num++] = 0x61; /* proto=sas, binary */
530 arr[num++] = 0xa3; /* piv=1, target device, naa */
533 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
537 arr[num++] = (target_dev_id >> 24);
538 arr[num++] = (target_dev_id >> 16) & 0xff;
539 arr[num++] = (target_dev_id >> 8) & 0xff;
540 arr[num++] = target_dev_id & 0xff;
541 /* SCSI name string: Target device identifier */
542 arr[num++] = 0x63; /* proto=sas, UTF-8 */
543 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
546 memcpy(arr + num, "naa.52222220", 12);
548 snprintf(b, sizeof(b), "%08X", target_dev_id);
549 memcpy(arr + num, b, 8);
551 memset(arr + num, 0, 4);
557 static unsigned char vpd84_data[] = {
558 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
559 0x22,0x22,0x22,0x0,0xbb,0x1,
560 0x22,0x22,0x22,0x0,0xbb,0x2,
563 static int inquiry_evpd_84(unsigned char * arr)
565 memcpy(arr, vpd84_data, sizeof(vpd84_data));
566 return sizeof(vpd84_data);
569 static int inquiry_evpd_85(unsigned char * arr)
572 const char * na1 = "https://www.kernel.org/config";
573 const char * na2 = "http://www.kernel.org/log";
576 arr[num++] = 0x1; /* lu, storage config */
577 arr[num++] = 0x0; /* reserved */
582 plen = ((plen / 4) + 1) * 4;
583 arr[num++] = plen; /* length, null termianted, padded */
584 memcpy(arr + num, na1, olen);
585 memset(arr + num + olen, 0, plen - olen);
588 arr[num++] = 0x4; /* lu, logging */
589 arr[num++] = 0x0; /* reserved */
594 plen = ((plen / 4) + 1) * 4;
595 arr[num++] = plen; /* length, null terminated, padded */
596 memcpy(arr + num, na2, olen);
597 memset(arr + num + olen, 0, plen - olen);
603 /* SCSI ports VPD page */
604 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
609 port_a = target_dev_id + 1;
611 arr[num++] = 0x0; /* reserved */
612 arr[num++] = 0x0; /* reserved */
614 arr[num++] = 0x1; /* relative port 1 (primary) */
615 memset(arr + num, 0, 6);
618 arr[num++] = 12; /* length tp descriptor */
619 /* naa-5 target port identifier (A) */
620 arr[num++] = 0x61; /* proto=sas, binary */
621 arr[num++] = 0x93; /* PIV=1, target port, NAA */
622 arr[num++] = 0x0; /* reserved */
623 arr[num++] = 0x8; /* length */
624 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
628 arr[num++] = (port_a >> 24);
629 arr[num++] = (port_a >> 16) & 0xff;
630 arr[num++] = (port_a >> 8) & 0xff;
631 arr[num++] = port_a & 0xff;
633 arr[num++] = 0x0; /* reserved */
634 arr[num++] = 0x0; /* reserved */
636 arr[num++] = 0x2; /* relative port 2 (secondary) */
637 memset(arr + num, 0, 6);
640 arr[num++] = 12; /* length tp descriptor */
641 /* naa-5 target port identifier (B) */
642 arr[num++] = 0x61; /* proto=sas, binary */
643 arr[num++] = 0x93; /* PIV=1, target port, NAA */
644 arr[num++] = 0x0; /* reserved */
645 arr[num++] = 0x8; /* length */
646 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
650 arr[num++] = (port_b >> 24);
651 arr[num++] = (port_b >> 16) & 0xff;
652 arr[num++] = (port_b >> 8) & 0xff;
653 arr[num++] = port_b & 0xff;
659 static unsigned char vpd89_data[] = {
660 /* from 4th byte */ 0,0,0,0,
661 'l','i','n','u','x',' ',' ',' ',
662 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
664 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
666 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
667 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
668 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
669 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
671 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
673 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
675 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
676 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
677 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
678 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
679 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
680 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
681 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
682 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
683 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
684 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
685 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
686 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
687 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
688 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
689 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
690 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
691 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
692 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
693 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
695 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
696 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
697 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
698 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
699 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
700 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
703 static int inquiry_evpd_89(unsigned char * arr)
705 memcpy(arr, vpd89_data, sizeof(vpd89_data));
706 return sizeof(vpd89_data);
710 /* Block limits VPD page (SBC-3) */
711 static unsigned char vpdb0_data[] = {
712 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
713 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
714 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
715 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
718 static int inquiry_evpd_b0(unsigned char * arr)
722 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
724 /* Optimal transfer length granularity */
725 gran = 1 << scsi_debug_physblk_exp;
726 arr[2] = (gran >> 8) & 0xff;
727 arr[3] = gran & 0xff;
729 /* Maximum Transfer Length */
730 if (sdebug_store_sectors > 0x400) {
731 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
732 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
733 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
734 arr[7] = sdebug_store_sectors & 0xff;
737 /* Optimal Transfer Length */
738 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
740 if (scsi_debug_lbpu) {
741 /* Maximum Unmap LBA Count */
742 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
744 /* Maximum Unmap Block Descriptor Count */
745 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
748 /* Unmap Granularity Alignment */
749 if (scsi_debug_unmap_alignment) {
750 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
751 arr[28] |= 0x80; /* UGAVALID */
754 /* Optimal Unmap Granularity */
755 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
757 /* Maximum WRITE SAME Length */
758 put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
760 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
762 return sizeof(vpdb0_data);
765 /* Block device characteristics VPD page (SBC-3) */
766 static int inquiry_evpd_b1(unsigned char *arr)
768 memset(arr, 0, 0x3c);
770 arr[1] = 1; /* non rotating medium (e.g. solid state) */
772 arr[3] = 5; /* less than 1.8" */
777 /* Thin provisioning VPD page (SBC-3) */
778 static int inquiry_evpd_b2(unsigned char *arr)
781 arr[0] = 0; /* threshold exponent */
786 if (scsi_debug_lbpws)
789 if (scsi_debug_lbpws10)
795 #define SDEBUG_LONG_INQ_SZ 96
796 #define SDEBUG_MAX_INQ_ARR_SZ 584
798 static int resp_inquiry(struct scsi_cmnd * scp, int target,
799 struct sdebug_dev_info * devip)
801 unsigned char pq_pdt;
803 unsigned char *cmd = (unsigned char *)scp->cmnd;
804 int alloc_len, n, ret;
806 alloc_len = (cmd[3] << 8) + cmd[4];
807 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
809 return DID_REQUEUE << 16;
811 pq_pdt = 0x1e; /* present, wlun */
812 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
813 pq_pdt = 0x7f; /* not present, no device type */
815 pq_pdt = (scsi_debug_ptype & 0x1f);
817 if (0x2 & cmd[1]) { /* CMDDT bit set */
818 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
821 return check_condition_result;
822 } else if (0x1 & cmd[1]) { /* EVPD bit set */
823 int lu_id_num, port_group_id, target_dev_id, len;
825 int host_no = devip->sdbg_host->shost->host_no;
827 port_group_id = (((host_no + 1) & 0x7f) << 8) +
828 (devip->channel & 0x7f);
829 if (0 == scsi_debug_vpd_use_hostno)
831 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
832 (devip->target * 1000) + devip->lun);
833 target_dev_id = ((host_no + 1) * 2000) +
834 (devip->target * 1000) - 3;
835 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
836 if (0 == cmd[2]) { /* supported vital product data pages */
837 arr[1] = cmd[2]; /*sanity */
839 arr[n++] = 0x0; /* this page */
840 arr[n++] = 0x80; /* unit serial number */
841 arr[n++] = 0x83; /* device identification */
842 arr[n++] = 0x84; /* software interface ident. */
843 arr[n++] = 0x85; /* management network addresses */
844 arr[n++] = 0x86; /* extended inquiry */
845 arr[n++] = 0x87; /* mode page policy */
846 arr[n++] = 0x88; /* SCSI ports */
847 arr[n++] = 0x89; /* ATA information */
848 arr[n++] = 0xb0; /* Block limits (SBC) */
849 arr[n++] = 0xb1; /* Block characteristics (SBC) */
850 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
852 arr[3] = n - 4; /* number of supported VPD pages */
853 } else if (0x80 == cmd[2]) { /* unit serial number */
854 arr[1] = cmd[2]; /*sanity */
856 memcpy(&arr[4], lu_id_str, len);
857 } else if (0x83 == cmd[2]) { /* device identification */
858 arr[1] = cmd[2]; /*sanity */
859 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
860 target_dev_id, lu_id_num,
862 } else if (0x84 == cmd[2]) { /* Software interface ident. */
863 arr[1] = cmd[2]; /*sanity */
864 arr[3] = inquiry_evpd_84(&arr[4]);
865 } else if (0x85 == cmd[2]) { /* Management network addresses */
866 arr[1] = cmd[2]; /*sanity */
867 arr[3] = inquiry_evpd_85(&arr[4]);
868 } else if (0x86 == cmd[2]) { /* extended inquiry */
869 arr[1] = cmd[2]; /*sanity */
870 arr[3] = 0x3c; /* number of following entries */
871 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
872 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
873 else if (scsi_debug_dif)
874 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
876 arr[4] = 0x0; /* no protection stuff */
877 arr[5] = 0x7; /* head of q, ordered + simple q's */
878 } else if (0x87 == cmd[2]) { /* mode page policy */
879 arr[1] = cmd[2]; /*sanity */
880 arr[3] = 0x8; /* number of following entries */
881 arr[4] = 0x2; /* disconnect-reconnect mp */
882 arr[6] = 0x80; /* mlus, shared */
883 arr[8] = 0x18; /* protocol specific lu */
884 arr[10] = 0x82; /* mlus, per initiator port */
885 } else if (0x88 == cmd[2]) { /* SCSI Ports */
886 arr[1] = cmd[2]; /*sanity */
887 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
888 } else if (0x89 == cmd[2]) { /* ATA information */
889 arr[1] = cmd[2]; /*sanity */
890 n = inquiry_evpd_89(&arr[4]);
893 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
894 arr[1] = cmd[2]; /*sanity */
895 arr[3] = inquiry_evpd_b0(&arr[4]);
896 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
897 arr[1] = cmd[2]; /*sanity */
898 arr[3] = inquiry_evpd_b1(&arr[4]);
899 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
900 arr[1] = cmd[2]; /*sanity */
901 arr[3] = inquiry_evpd_b2(&arr[4]);
903 /* Illegal request, invalid field in cdb */
904 mk_sense_buffer(devip, ILLEGAL_REQUEST,
905 INVALID_FIELD_IN_CDB, 0);
907 return check_condition_result;
909 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
910 ret = fill_from_dev_buffer(scp, arr,
911 min(len, SDEBUG_MAX_INQ_ARR_SZ));
915 /* drops through here for a standard inquiry */
916 arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */
917 arr[2] = scsi_debug_scsi_level;
918 arr[3] = 2; /* response_data_format==2 */
919 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
920 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
921 if (0 == scsi_debug_vpd_use_hostno)
922 arr[5] = 0x10; /* claim: implicit TGPS */
923 arr[6] = 0x10; /* claim: MultiP */
924 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
925 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
926 memcpy(&arr[8], inq_vendor_id, 8);
927 memcpy(&arr[16], inq_product_id, 16);
928 memcpy(&arr[32], inq_product_rev, 4);
929 /* version descriptors (2 bytes each) follow */
930 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
931 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
933 if (scsi_debug_ptype == 0) {
934 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
935 } else if (scsi_debug_ptype == 1) {
936 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
938 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
939 ret = fill_from_dev_buffer(scp, arr,
940 min(alloc_len, SDEBUG_LONG_INQ_SZ));
945 static int resp_requests(struct scsi_cmnd * scp,
946 struct sdebug_dev_info * devip)
948 unsigned char * sbuff;
949 unsigned char *cmd = (unsigned char *)scp->cmnd;
950 unsigned char arr[SDEBUG_SENSE_LEN];
954 memset(arr, 0, sizeof(arr));
955 if (devip->reset == 1)
956 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
957 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
958 sbuff = devip->sense_buff;
959 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
962 arr[1] = 0x0; /* NO_SENSE in sense_key */
963 arr[2] = THRESHOLD_EXCEEDED;
964 arr[3] = 0xff; /* TEST set and MRIE==6 */
967 arr[2] = 0x0; /* NO_SENSE in sense_key */
968 arr[7] = 0xa; /* 18 byte sense buffer */
969 arr[12] = THRESHOLD_EXCEEDED;
970 arr[13] = 0xff; /* TEST set and MRIE==6 */
973 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
974 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
975 /* DESC bit set and sense_buff in fixed format */
976 memset(arr, 0, sizeof(arr));
978 arr[1] = sbuff[2]; /* sense key */
979 arr[2] = sbuff[12]; /* asc */
980 arr[3] = sbuff[13]; /* ascq */
984 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
985 return fill_from_dev_buffer(scp, arr, len);
988 static int resp_start_stop(struct scsi_cmnd * scp,
989 struct sdebug_dev_info * devip)
991 unsigned char *cmd = (unsigned char *)scp->cmnd;
992 int power_cond, errsts, start;
994 if ((errsts = check_readiness(scp, 1, devip)))
996 power_cond = (cmd[4] & 0xf0) >> 4;
998 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1000 return check_condition_result;
1003 if (start == devip->stopped)
1004 devip->stopped = !start;
1008 static sector_t get_sdebug_capacity(void)
1010 if (scsi_debug_virtual_gb > 0)
1011 return (sector_t)scsi_debug_virtual_gb *
1012 (1073741824 / scsi_debug_sector_size);
1014 return sdebug_store_sectors;
1017 #define SDEBUG_READCAP_ARR_SZ 8
1018 static int resp_readcap(struct scsi_cmnd * scp,
1019 struct sdebug_dev_info * devip)
1021 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1025 if ((errsts = check_readiness(scp, 1, devip)))
1027 /* following just in case virtual_gb changed */
1028 sdebug_capacity = get_sdebug_capacity();
1029 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1030 if (sdebug_capacity < 0xffffffff) {
1031 capac = (unsigned int)sdebug_capacity - 1;
1032 arr[0] = (capac >> 24);
1033 arr[1] = (capac >> 16) & 0xff;
1034 arr[2] = (capac >> 8) & 0xff;
1035 arr[3] = capac & 0xff;
1042 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1043 arr[7] = scsi_debug_sector_size & 0xff;
1044 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1047 #define SDEBUG_READCAP16_ARR_SZ 32
1048 static int resp_readcap16(struct scsi_cmnd * scp,
1049 struct sdebug_dev_info * devip)
1051 unsigned char *cmd = (unsigned char *)scp->cmnd;
1052 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1053 unsigned long long capac;
1054 int errsts, k, alloc_len;
1056 if ((errsts = check_readiness(scp, 1, devip)))
1058 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1060 /* following just in case virtual_gb changed */
1061 sdebug_capacity = get_sdebug_capacity();
1062 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1063 capac = sdebug_capacity - 1;
1064 for (k = 0; k < 8; ++k, capac >>= 8)
1065 arr[7 - k] = capac & 0xff;
1066 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1067 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1068 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1069 arr[11] = scsi_debug_sector_size & 0xff;
1070 arr[13] = scsi_debug_physblk_exp & 0xf;
1071 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1073 if (scsi_debug_lbp())
1074 arr[14] |= 0x80; /* LBPME */
1076 arr[15] = scsi_debug_lowest_aligned & 0xff;
1078 if (scsi_debug_dif) {
1079 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1080 arr[12] |= 1; /* PROT_EN */
1083 return fill_from_dev_buffer(scp, arr,
1084 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1087 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1089 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1090 struct sdebug_dev_info * devip)
1092 unsigned char *cmd = (unsigned char *)scp->cmnd;
1093 unsigned char * arr;
1094 int host_no = devip->sdbg_host->shost->host_no;
1095 int n, ret, alen, rlen;
1096 int port_group_a, port_group_b, port_a, port_b;
1098 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1101 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1103 return DID_REQUEUE << 16;
1105 * EVPD page 0x88 states we have two ports, one
1106 * real and a fake port with no device connected.
1107 * So we create two port groups with one port each
1108 * and set the group with port B to unavailable.
1110 port_a = 0x1; /* relative port A */
1111 port_b = 0x2; /* relative port B */
1112 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1113 (devip->channel & 0x7f);
1114 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1115 (devip->channel & 0x7f) + 0x80;
1118 * The asymmetric access state is cycled according to the host_id.
1121 if (0 == scsi_debug_vpd_use_hostno) {
1122 arr[n++] = host_no % 3; /* Asymm access state */
1123 arr[n++] = 0x0F; /* claim: all states are supported */
1125 arr[n++] = 0x0; /* Active/Optimized path */
1126 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1128 arr[n++] = (port_group_a >> 8) & 0xff;
1129 arr[n++] = port_group_a & 0xff;
1130 arr[n++] = 0; /* Reserved */
1131 arr[n++] = 0; /* Status code */
1132 arr[n++] = 0; /* Vendor unique */
1133 arr[n++] = 0x1; /* One port per group */
1134 arr[n++] = 0; /* Reserved */
1135 arr[n++] = 0; /* Reserved */
1136 arr[n++] = (port_a >> 8) & 0xff;
1137 arr[n++] = port_a & 0xff;
1138 arr[n++] = 3; /* Port unavailable */
1139 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1140 arr[n++] = (port_group_b >> 8) & 0xff;
1141 arr[n++] = port_group_b & 0xff;
1142 arr[n++] = 0; /* Reserved */
1143 arr[n++] = 0; /* Status code */
1144 arr[n++] = 0; /* Vendor unique */
1145 arr[n++] = 0x1; /* One port per group */
1146 arr[n++] = 0; /* Reserved */
1147 arr[n++] = 0; /* Reserved */
1148 arr[n++] = (port_b >> 8) & 0xff;
1149 arr[n++] = port_b & 0xff;
1152 arr[0] = (rlen >> 24) & 0xff;
1153 arr[1] = (rlen >> 16) & 0xff;
1154 arr[2] = (rlen >> 8) & 0xff;
1155 arr[3] = rlen & 0xff;
1158 * Return the smallest value of either
1159 * - The allocated length
1160 * - The constructed command length
1161 * - The maximum array size
1164 ret = fill_from_dev_buffer(scp, arr,
1165 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1170 /* <<Following mode page info copied from ST318451LW>> */
1172 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1173 { /* Read-Write Error Recovery page for mode_sense */
1174 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1177 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1179 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1180 return sizeof(err_recov_pg);
1183 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1184 { /* Disconnect-Reconnect page for mode_sense */
1185 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1186 0, 0, 0, 0, 0, 0, 0, 0};
1188 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1190 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1191 return sizeof(disconnect_pg);
1194 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1195 { /* Format device page for mode_sense */
1196 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1197 0, 0, 0, 0, 0, 0, 0, 0,
1198 0, 0, 0, 0, 0x40, 0, 0, 0};
1200 memcpy(p, format_pg, sizeof(format_pg));
1201 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1202 p[11] = sdebug_sectors_per & 0xff;
1203 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1204 p[13] = scsi_debug_sector_size & 0xff;
1205 if (DEV_REMOVEABLE(target))
1206 p[20] |= 0x20; /* should agree with INQUIRY */
1208 memset(p + 2, 0, sizeof(format_pg) - 2);
1209 return sizeof(format_pg);
1212 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1213 { /* Caching page for mode_sense */
1214 unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1215 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1217 memcpy(p, caching_pg, sizeof(caching_pg));
1219 memset(p + 2, 0, sizeof(caching_pg) - 2);
1220 return sizeof(caching_pg);
1223 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1224 { /* Control mode page for mode_sense */
1225 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1227 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1230 if (scsi_debug_dsense)
1231 ctrl_m_pg[2] |= 0x4;
1233 ctrl_m_pg[2] &= ~0x4;
1236 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1238 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1240 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1241 else if (2 == pcontrol)
1242 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1243 return sizeof(ctrl_m_pg);
1247 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1248 { /* Informational Exceptions control mode page for mode_sense */
1249 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1251 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1254 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1256 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1257 else if (2 == pcontrol)
1258 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1259 return sizeof(iec_m_pg);
1262 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1263 { /* SAS SSP mode page - short format for mode_sense */
1264 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1265 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1267 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1269 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1270 return sizeof(sas_sf_m_pg);
1274 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1276 { /* SAS phy control and discover mode page for mode_sense */
1277 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1278 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1279 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1280 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1281 0x2, 0, 0, 0, 0, 0, 0, 0,
1282 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1283 0, 0, 0, 0, 0, 0, 0, 0,
1284 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1285 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1286 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1287 0x3, 0, 0, 0, 0, 0, 0, 0,
1288 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1289 0, 0, 0, 0, 0, 0, 0, 0,
1293 port_a = target_dev_id + 1;
1294 port_b = port_a + 1;
1295 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1296 p[20] = (port_a >> 24);
1297 p[21] = (port_a >> 16) & 0xff;
1298 p[22] = (port_a >> 8) & 0xff;
1299 p[23] = port_a & 0xff;
1300 p[48 + 20] = (port_b >> 24);
1301 p[48 + 21] = (port_b >> 16) & 0xff;
1302 p[48 + 22] = (port_b >> 8) & 0xff;
1303 p[48 + 23] = port_b & 0xff;
1305 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1306 return sizeof(sas_pcd_m_pg);
1309 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1310 { /* SAS SSP shared protocol specific port mode subpage */
1311 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1312 0, 0, 0, 0, 0, 0, 0, 0,
1315 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1317 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1318 return sizeof(sas_sha_m_pg);
1321 #define SDEBUG_MAX_MSENSE_SZ 256
1323 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1324 struct sdebug_dev_info * devip)
1326 unsigned char dbd, llbaa;
1327 int pcontrol, pcode, subpcode, bd_len;
1328 unsigned char dev_spec;
1329 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1331 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1332 unsigned char *cmd = (unsigned char *)scp->cmnd;
1334 if ((errsts = check_readiness(scp, 1, devip)))
1336 dbd = !!(cmd[1] & 0x8);
1337 pcontrol = (cmd[2] & 0xc0) >> 6;
1338 pcode = cmd[2] & 0x3f;
1340 msense_6 = (MODE_SENSE == cmd[0]);
1341 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1342 if ((0 == scsi_debug_ptype) && (0 == dbd))
1343 bd_len = llbaa ? 16 : 8;
1346 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1347 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1348 if (0x3 == pcontrol) { /* Saving values not supported */
1349 mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1351 return check_condition_result;
1353 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1354 (devip->target * 1000) - 3;
1355 /* set DPOFUA bit for disks */
1356 if (0 == scsi_debug_ptype)
1357 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1367 arr[4] = 0x1; /* set LONGLBA bit */
1368 arr[7] = bd_len; /* assume 255 or less */
1372 if ((bd_len > 0) && (!sdebug_capacity))
1373 sdebug_capacity = get_sdebug_capacity();
1376 if (sdebug_capacity > 0xfffffffe) {
1382 ap[0] = (sdebug_capacity >> 24) & 0xff;
1383 ap[1] = (sdebug_capacity >> 16) & 0xff;
1384 ap[2] = (sdebug_capacity >> 8) & 0xff;
1385 ap[3] = sdebug_capacity & 0xff;
1387 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1388 ap[7] = scsi_debug_sector_size & 0xff;
1391 } else if (16 == bd_len) {
1392 unsigned long long capac = sdebug_capacity;
1394 for (k = 0; k < 8; ++k, capac >>= 8)
1395 ap[7 - k] = capac & 0xff;
1396 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1397 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1398 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1399 ap[15] = scsi_debug_sector_size & 0xff;
1404 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1405 /* TODO: Control Extension page */
1406 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1408 return check_condition_result;
1411 case 0x1: /* Read-Write error recovery page, direct access */
1412 len = resp_err_recov_pg(ap, pcontrol, target);
1415 case 0x2: /* Disconnect-Reconnect page, all devices */
1416 len = resp_disconnect_pg(ap, pcontrol, target);
1419 case 0x3: /* Format device page, direct access */
1420 len = resp_format_pg(ap, pcontrol, target);
1423 case 0x8: /* Caching page, direct access */
1424 len = resp_caching_pg(ap, pcontrol, target);
1427 case 0xa: /* Control Mode page, all devices */
1428 len = resp_ctrl_m_pg(ap, pcontrol, target);
1431 case 0x19: /* if spc==1 then sas phy, control+discover */
1432 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1433 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1434 INVALID_FIELD_IN_CDB, 0);
1435 return check_condition_result;
1438 if ((0x0 == subpcode) || (0xff == subpcode))
1439 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1440 if ((0x1 == subpcode) || (0xff == subpcode))
1441 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1443 if ((0x2 == subpcode) || (0xff == subpcode))
1444 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1447 case 0x1c: /* Informational Exceptions Mode page, all devices */
1448 len = resp_iec_m_pg(ap, pcontrol, target);
1451 case 0x3f: /* Read all Mode pages */
1452 if ((0 == subpcode) || (0xff == subpcode)) {
1453 len = resp_err_recov_pg(ap, pcontrol, target);
1454 len += resp_disconnect_pg(ap + len, pcontrol, target);
1455 len += resp_format_pg(ap + len, pcontrol, target);
1456 len += resp_caching_pg(ap + len, pcontrol, target);
1457 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1458 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1459 if (0xff == subpcode) {
1460 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1461 target, target_dev_id);
1462 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1464 len += resp_iec_m_pg(ap + len, pcontrol, target);
1466 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1467 INVALID_FIELD_IN_CDB, 0);
1468 return check_condition_result;
1473 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1475 return check_condition_result;
1478 arr[0] = offset - 1;
1480 arr[0] = ((offset - 2) >> 8) & 0xff;
1481 arr[1] = (offset - 2) & 0xff;
1483 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1486 #define SDEBUG_MAX_MSELECT_SZ 512
1488 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1489 struct sdebug_dev_info * devip)
1491 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1492 int param_len, res, errsts, mpage;
1493 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1494 unsigned char *cmd = (unsigned char *)scp->cmnd;
1496 if ((errsts = check_readiness(scp, 1, devip)))
1498 memset(arr, 0, sizeof(arr));
1501 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1502 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1503 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1504 INVALID_FIELD_IN_CDB, 0);
1505 return check_condition_result;
1507 res = fetch_to_dev_buffer(scp, arr, param_len);
1509 return (DID_ERROR << 16);
1510 else if ((res < param_len) &&
1511 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1512 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1513 " IO sent=%d bytes\n", param_len, res);
1514 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1515 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1517 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1518 INVALID_FIELD_IN_PARAM_LIST, 0);
1519 return check_condition_result;
1521 off = bd_len + (mselect6 ? 4 : 8);
1522 mpage = arr[off] & 0x3f;
1523 ps = !!(arr[off] & 0x80);
1525 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1526 INVALID_FIELD_IN_PARAM_LIST, 0);
1527 return check_condition_result;
1529 spf = !!(arr[off] & 0x40);
1530 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1532 if ((pg_len + off) > param_len) {
1533 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1534 PARAMETER_LIST_LENGTH_ERR, 0);
1535 return check_condition_result;
1538 case 0xa: /* Control Mode page */
1539 if (ctrl_m_pg[1] == arr[off + 1]) {
1540 memcpy(ctrl_m_pg + 2, arr + off + 2,
1541 sizeof(ctrl_m_pg) - 2);
1542 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1546 case 0x1c: /* Informational Exceptions Mode page */
1547 if (iec_m_pg[1] == arr[off + 1]) {
1548 memcpy(iec_m_pg + 2, arr + off + 2,
1549 sizeof(iec_m_pg) - 2);
1556 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1557 INVALID_FIELD_IN_PARAM_LIST, 0);
1558 return check_condition_result;
1561 static int resp_temp_l_pg(unsigned char * arr)
1563 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1564 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1567 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1568 return sizeof(temp_l_pg);
1571 static int resp_ie_l_pg(unsigned char * arr)
1573 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1576 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1577 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1578 arr[4] = THRESHOLD_EXCEEDED;
1581 return sizeof(ie_l_pg);
1584 #define SDEBUG_MAX_LSENSE_SZ 512
1586 static int resp_log_sense(struct scsi_cmnd * scp,
1587 struct sdebug_dev_info * devip)
1589 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1590 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1591 unsigned char *cmd = (unsigned char *)scp->cmnd;
1593 if ((errsts = check_readiness(scp, 1, devip)))
1595 memset(arr, 0, sizeof(arr));
1599 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1600 INVALID_FIELD_IN_CDB, 0);
1601 return check_condition_result;
1603 pcontrol = (cmd[2] & 0xc0) >> 6;
1604 pcode = cmd[2] & 0x3f;
1605 subpcode = cmd[3] & 0xff;
1606 alloc_len = (cmd[7] << 8) + cmd[8];
1608 if (0 == subpcode) {
1610 case 0x0: /* Supported log pages log page */
1612 arr[n++] = 0x0; /* this page */
1613 arr[n++] = 0xd; /* Temperature */
1614 arr[n++] = 0x2f; /* Informational exceptions */
1617 case 0xd: /* Temperature log page */
1618 arr[3] = resp_temp_l_pg(arr + 4);
1620 case 0x2f: /* Informational exceptions log page */
1621 arr[3] = resp_ie_l_pg(arr + 4);
1624 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1625 INVALID_FIELD_IN_CDB, 0);
1626 return check_condition_result;
1628 } else if (0xff == subpcode) {
1632 case 0x0: /* Supported log pages and subpages log page */
1635 arr[n++] = 0x0; /* 0,0 page */
1637 arr[n++] = 0xff; /* this page */
1639 arr[n++] = 0x0; /* Temperature */
1641 arr[n++] = 0x0; /* Informational exceptions */
1644 case 0xd: /* Temperature subpages */
1647 arr[n++] = 0x0; /* Temperature */
1650 case 0x2f: /* Informational exceptions subpages */
1653 arr[n++] = 0x0; /* Informational exceptions */
1657 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1658 INVALID_FIELD_IN_CDB, 0);
1659 return check_condition_result;
1662 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1663 INVALID_FIELD_IN_CDB, 0);
1664 return check_condition_result;
1666 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1667 return fill_from_dev_buffer(scp, arr,
1668 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1671 static int check_device_access_params(struct sdebug_dev_info *devi,
1672 unsigned long long lba, unsigned int num)
1674 if (lba + num > sdebug_capacity) {
1675 mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1676 return check_condition_result;
1678 /* transfer length excessive (tie in to block limits VPD page) */
1679 if (num > sdebug_store_sectors) {
1680 mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1681 return check_condition_result;
1686 static int do_device_access(struct scsi_cmnd *scmd,
1687 struct sdebug_dev_info *devi,
1688 unsigned long long lba, unsigned int num, int write)
1691 unsigned long long block, rest = 0;
1692 int (*func)(struct scsi_cmnd *, unsigned char *, int);
1694 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
1696 block = do_div(lba, sdebug_store_sectors);
1697 if (block + num > sdebug_store_sectors)
1698 rest = block + num - sdebug_store_sectors;
1700 ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
1701 (num - rest) * scsi_debug_sector_size);
1703 ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
1708 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1709 unsigned int sectors, u32 ei_lba)
1711 unsigned int i, resid;
1712 struct scatterlist *psgl;
1713 struct sd_dif_tuple *sdt;
1715 sector_t tmp_sec = start_sec;
1718 start_sec = do_div(tmp_sec, sdebug_store_sectors);
1720 sdt = (struct sd_dif_tuple *)(dif_storep + dif_offset(start_sec));
1722 for (i = 0 ; i < sectors ; i++) {
1725 if (sdt[i].app_tag == 0xffff)
1728 sector = start_sec + i;
1730 switch (scsi_debug_guard) {
1732 csum = ip_compute_csum(fake_storep +
1733 sector * scsi_debug_sector_size,
1734 scsi_debug_sector_size);
1737 csum = crc_t10dif(fake_storep +
1738 sector * scsi_debug_sector_size,
1739 scsi_debug_sector_size);
1740 csum = cpu_to_be16(csum);
1746 if (sdt[i].guard_tag != csum) {
1747 printk(KERN_ERR "%s: GUARD check failed on sector %lu" \
1748 " rcvd 0x%04x, data 0x%04x\n", __func__,
1749 (unsigned long)sector,
1750 be16_to_cpu(sdt[i].guard_tag),
1756 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1757 be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) {
1758 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1759 __func__, (unsigned long)sector);
1764 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1765 be32_to_cpu(sdt[i].ref_tag) != ei_lba) {
1766 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1767 __func__, (unsigned long)sector);
1775 resid = sectors * 8; /* Bytes of protection data to copy into sgl */
1778 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1779 int len = min(psgl->length, resid);
1781 paddr = kmap_atomic(sg_page(psgl), KM_IRQ0) + psgl->offset;
1782 memcpy(paddr, dif_storep + dif_offset(sector), len);
1785 if (sector >= sdebug_store_sectors) {
1788 sector = do_div(tmp_sec, sdebug_store_sectors);
1791 kunmap_atomic(paddr, KM_IRQ0);
1799 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1800 unsigned int num, struct sdebug_dev_info *devip,
1803 unsigned long iflags;
1806 ret = check_device_access_params(devip, lba, num);
1810 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1811 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
1812 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1813 /* claim unrecoverable read error */
1814 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
1815 /* set info field and valid bit for fixed descriptor */
1816 if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1817 devip->sense_buff[0] |= 0x80; /* Valid bit */
1818 ret = (lba < OPT_MEDIUM_ERR_ADDR)
1819 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
1820 devip->sense_buff[3] = (ret >> 24) & 0xff;
1821 devip->sense_buff[4] = (ret >> 16) & 0xff;
1822 devip->sense_buff[5] = (ret >> 8) & 0xff;
1823 devip->sense_buff[6] = ret & 0xff;
1825 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
1826 return check_condition_result;
1830 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1831 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1834 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1835 return illegal_condition_result;
1839 read_lock_irqsave(&atomic_rw, iflags);
1840 ret = do_device_access(SCpnt, devip, lba, num, 0);
1841 read_unlock_irqrestore(&atomic_rw, iflags);
1845 void dump_sector(unsigned char *buf, int len)
1849 printk(KERN_ERR ">>> Sector Dump <<<\n");
1851 for (i = 0 ; i < len ; i += 16) {
1852 printk(KERN_ERR "%04d: ", i);
1854 for (j = 0 ; j < 16 ; j++) {
1855 unsigned char c = buf[i+j];
1856 if (c >= 0x20 && c < 0x7e)
1857 printk(" %c ", buf[i+j]);
1859 printk("%02x ", buf[i+j]);
1866 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1867 unsigned int sectors, u32 ei_lba)
1870 struct sd_dif_tuple *sdt;
1871 struct scatterlist *dsgl = scsi_sglist(SCpnt);
1872 struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1873 void *daddr, *paddr;
1874 sector_t tmp_sec = start_sec;
1877 unsigned short csum;
1879 sector = do_div(tmp_sec, sdebug_store_sectors);
1881 BUG_ON(scsi_sg_count(SCpnt) == 0);
1882 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1884 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset;
1887 /* For each data page */
1888 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1889 daddr = kmap_atomic(sg_page(dsgl), KM_IRQ0) + dsgl->offset;
1891 /* For each sector-sized chunk in data page */
1892 for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
1894 /* If we're at the end of the current
1895 * protection page advance to the next one
1897 if (ppage_offset >= psgl->length) {
1898 kunmap_atomic(paddr, KM_IRQ1);
1899 psgl = sg_next(psgl);
1900 BUG_ON(psgl == NULL);
1901 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1)
1906 sdt = paddr + ppage_offset;
1908 switch (scsi_debug_guard) {
1910 csum = ip_compute_csum(daddr,
1911 scsi_debug_sector_size);
1914 csum = cpu_to_be16(crc_t10dif(daddr,
1915 scsi_debug_sector_size));
1923 if (sdt->guard_tag != csum) {
1925 "%s: GUARD check failed on sector %lu " \
1926 "rcvd 0x%04x, calculated 0x%04x\n",
1927 __func__, (unsigned long)sector,
1928 be16_to_cpu(sdt->guard_tag),
1931 dump_sector(daddr, scsi_debug_sector_size);
1935 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1936 be32_to_cpu(sdt->ref_tag)
1937 != (start_sec & 0xffffffff)) {
1939 "%s: REF check failed on sector %lu\n",
1940 __func__, (unsigned long)sector);
1942 dump_sector(daddr, scsi_debug_sector_size);
1946 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1947 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1949 "%s: REF check failed on sector %lu\n",
1950 __func__, (unsigned long)sector);
1952 dump_sector(daddr, scsi_debug_sector_size);
1956 /* Would be great to copy this in bigger
1957 * chunks. However, for the sake of
1958 * correctness we need to verify each sector
1959 * before writing it to "stable" storage
1961 memcpy(dif_storep + dif_offset(sector), sdt, 8);
1965 if (sector == sdebug_store_sectors)
1966 sector = 0; /* Force wrap */
1970 daddr += scsi_debug_sector_size;
1971 ppage_offset += sizeof(struct sd_dif_tuple);
1974 kunmap_atomic(daddr, KM_IRQ0);
1977 kunmap_atomic(paddr, KM_IRQ1);
1985 kunmap_atomic(daddr, KM_IRQ0);
1986 kunmap_atomic(paddr, KM_IRQ1);
1990 static unsigned int map_state(sector_t lba, unsigned int *num)
1992 unsigned int granularity, alignment, mapped;
1993 sector_t block, next, end;
1995 granularity = scsi_debug_unmap_granularity;
1996 alignment = granularity - scsi_debug_unmap_alignment;
1997 block = lba + alignment;
1998 do_div(block, granularity);
2000 mapped = test_bit(block, map_storep);
2003 next = find_next_zero_bit(map_storep, map_size, block);
2005 next = find_next_bit(map_storep, map_size, block);
2007 end = next * granularity - scsi_debug_unmap_alignment;
2013 static void map_region(sector_t lba, unsigned int len)
2015 unsigned int granularity, alignment;
2016 sector_t end = lba + len;
2018 granularity = scsi_debug_unmap_granularity;
2019 alignment = granularity - scsi_debug_unmap_alignment;
2022 sector_t block, rem;
2024 block = lba + alignment;
2025 rem = do_div(block, granularity);
2027 if (block < map_size)
2028 set_bit(block, map_storep);
2030 lba += granularity - rem;
2034 static void unmap_region(sector_t lba, unsigned int len)
2036 unsigned int granularity, alignment;
2037 sector_t end = lba + len;
2039 granularity = scsi_debug_unmap_granularity;
2040 alignment = granularity - scsi_debug_unmap_alignment;
2043 sector_t block, rem;
2045 block = lba + alignment;
2046 rem = do_div(block, granularity);
2048 if (rem == 0 && lba + granularity < end && block < map_size)
2049 clear_bit(block, map_storep);
2051 lba += granularity - rem;
2055 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2056 unsigned int num, struct sdebug_dev_info *devip,
2059 unsigned long iflags;
2062 ret = check_device_access_params(devip, lba, num);
2067 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2068 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2071 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2072 return illegal_condition_result;
2076 write_lock_irqsave(&atomic_rw, iflags);
2077 ret = do_device_access(SCpnt, devip, lba, num, 1);
2078 if (scsi_debug_unmap_granularity)
2079 map_region(lba, num);
2080 write_unlock_irqrestore(&atomic_rw, iflags);
2082 return (DID_ERROR << 16);
2083 else if ((ret < (num * scsi_debug_sector_size)) &&
2084 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2085 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2086 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2091 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2092 unsigned int num, struct sdebug_dev_info *devip,
2093 u32 ei_lba, unsigned int unmap)
2095 unsigned long iflags;
2096 unsigned long long i;
2099 ret = check_device_access_params(devip, lba, num);
2103 if (num > scsi_debug_write_same_length) {
2104 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2106 return check_condition_result;
2109 write_lock_irqsave(&atomic_rw, iflags);
2111 if (unmap && scsi_debug_unmap_granularity) {
2112 unmap_region(lba, num);
2116 /* Else fetch one logical block */
2117 ret = fetch_to_dev_buffer(scmd,
2118 fake_storep + (lba * scsi_debug_sector_size),
2119 scsi_debug_sector_size);
2122 write_unlock_irqrestore(&atomic_rw, iflags);
2123 return (DID_ERROR << 16);
2124 } else if ((ret < (num * scsi_debug_sector_size)) &&
2125 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2126 printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2127 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2129 /* Copy first sector to remaining blocks */
2130 for (i = 1 ; i < num ; i++)
2131 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2132 fake_storep + (lba * scsi_debug_sector_size),
2133 scsi_debug_sector_size);
2135 if (scsi_debug_unmap_granularity)
2136 map_region(lba, num);
2138 write_unlock_irqrestore(&atomic_rw, iflags);
2143 struct unmap_block_desc {
2149 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2152 struct unmap_block_desc *desc;
2153 unsigned int i, payload_len, descriptors;
2156 ret = check_readiness(scmd, 1, devip);
2160 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2161 BUG_ON(scsi_bufflen(scmd) != payload_len);
2163 descriptors = (payload_len - 8) / 16;
2165 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2167 return check_condition_result;
2169 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2171 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2172 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2174 desc = (void *)&buf[8];
2176 for (i = 0 ; i < descriptors ; i++) {
2177 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2178 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2180 ret = check_device_access_params(devip, lba, num);
2184 unmap_region(lba, num);
2195 #define SDEBUG_GET_LBA_STATUS_LEN 32
2197 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2198 struct sdebug_dev_info * devip)
2200 unsigned long long lba;
2201 unsigned int alloc_len, mapped, num;
2202 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2205 ret = check_readiness(scmd, 1, devip);
2209 lba = get_unaligned_be64(&scmd->cmnd[2]);
2210 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2215 ret = check_device_access_params(devip, lba, 1);
2219 mapped = map_state(lba, &num);
2221 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2222 put_unaligned_be32(16, &arr[0]); /* Parameter Data Length */
2223 put_unaligned_be64(lba, &arr[8]); /* LBA */
2224 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2225 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2227 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2230 #define SDEBUG_RLUN_ARR_SZ 256
2232 static int resp_report_luns(struct scsi_cmnd * scp,
2233 struct sdebug_dev_info * devip)
2235 unsigned int alloc_len;
2236 int lun_cnt, i, upper, num, n, wlun, lun;
2237 unsigned char *cmd = (unsigned char *)scp->cmnd;
2238 int select_report = (int)cmd[2];
2239 struct scsi_lun *one_lun;
2240 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2241 unsigned char * max_addr;
2243 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2244 if ((alloc_len < 4) || (select_report > 2)) {
2245 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2247 return check_condition_result;
2249 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2250 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2251 lun_cnt = scsi_debug_max_luns;
2252 if (1 == select_report)
2254 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2256 wlun = (select_report > 0) ? 1 : 0;
2257 num = lun_cnt + wlun;
2258 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2259 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2260 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2261 sizeof(struct scsi_lun)), num);
2266 one_lun = (struct scsi_lun *) &arr[8];
2267 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2268 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2269 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2271 upper = (lun >> 8) & 0x3f;
2273 one_lun[i].scsi_lun[0] =
2274 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2275 one_lun[i].scsi_lun[1] = lun & 0xff;
2278 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2279 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2282 alloc_len = (unsigned char *)(one_lun + i) - arr;
2283 return fill_from_dev_buffer(scp, arr,
2284 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2287 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2288 unsigned int num, struct sdebug_dev_info *devip)
2291 unsigned char *kaddr, *buf;
2292 unsigned int offset;
2293 struct scatterlist *sg;
2294 struct scsi_data_buffer *sdb = scsi_in(scp);
2296 /* better not to use temporary buffer. */
2297 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2301 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2304 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2305 kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
2309 for (j = 0; j < sg->length; j++)
2310 *(kaddr + sg->offset + j) ^= *(buf + offset + j);
2312 offset += sg->length;
2313 kunmap_atomic(kaddr, KM_USER0);
2322 /* When timer goes off this function is called. */
2323 static void timer_intr_handler(unsigned long indx)
2325 struct sdebug_queued_cmd * sqcp;
2326 unsigned long iflags;
2328 if (indx >= scsi_debug_max_queue) {
2329 printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2333 spin_lock_irqsave(&queued_arr_lock, iflags);
2334 sqcp = &queued_arr[(int)indx];
2335 if (! sqcp->in_use) {
2336 printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2338 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2342 if (sqcp->done_funct) {
2343 sqcp->a_cmnd->result = sqcp->scsi_result;
2344 sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2346 sqcp->done_funct = NULL;
2347 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2351 static struct sdebug_dev_info *
2352 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2354 struct sdebug_dev_info *devip;
2356 devip = kzalloc(sizeof(*devip), flags);
2358 devip->sdbg_host = sdbg_host;
2359 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2364 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2366 struct sdebug_host_info * sdbg_host;
2367 struct sdebug_dev_info * open_devip = NULL;
2368 struct sdebug_dev_info * devip =
2369 (struct sdebug_dev_info *)sdev->hostdata;
2373 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2375 printk(KERN_ERR "Host info NULL\n");
2378 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2379 if ((devip->used) && (devip->channel == sdev->channel) &&
2380 (devip->target == sdev->id) &&
2381 (devip->lun == sdev->lun))
2384 if ((!devip->used) && (!open_devip))
2388 if (!open_devip) { /* try and make a new one */
2389 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2391 printk(KERN_ERR "%s: out of memory at line %d\n",
2392 __func__, __LINE__);
2397 open_devip->channel = sdev->channel;
2398 open_devip->target = sdev->id;
2399 open_devip->lun = sdev->lun;
2400 open_devip->sdbg_host = sdbg_host;
2401 open_devip->reset = 1;
2402 open_devip->used = 1;
2403 memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2404 if (scsi_debug_dsense)
2405 open_devip->sense_buff[0] = 0x72;
2407 open_devip->sense_buff[0] = 0x70;
2408 open_devip->sense_buff[7] = 0xa;
2410 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2411 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2416 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2418 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2419 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2420 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2421 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2425 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2427 struct sdebug_dev_info *devip;
2429 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2430 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2431 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2432 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2433 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2434 devip = devInfoReg(sdp);
2436 return 1; /* no resources, will be marked offline */
2437 sdp->hostdata = devip;
2438 if (sdp->host->cmd_per_lun)
2439 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2440 sdp->host->cmd_per_lun);
2441 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2442 if (scsi_debug_no_uld)
2443 sdp->no_uld_attach = 1;
2447 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2449 struct sdebug_dev_info *devip =
2450 (struct sdebug_dev_info *)sdp->hostdata;
2452 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2453 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2454 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2456 /* make this slot available for re-use */
2458 sdp->hostdata = NULL;
2462 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2463 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2465 unsigned long iflags;
2467 struct sdebug_queued_cmd *sqcp;
2469 spin_lock_irqsave(&queued_arr_lock, iflags);
2470 for (k = 0; k < scsi_debug_max_queue; ++k) {
2471 sqcp = &queued_arr[k];
2472 if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2473 del_timer_sync(&sqcp->cmnd_timer);
2475 sqcp->a_cmnd = NULL;
2479 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2480 return (k < scsi_debug_max_queue) ? 1 : 0;
2483 /* Deletes (stops) timers of all queued commands */
2484 static void stop_all_queued(void)
2486 unsigned long iflags;
2488 struct sdebug_queued_cmd *sqcp;
2490 spin_lock_irqsave(&queued_arr_lock, iflags);
2491 for (k = 0; k < scsi_debug_max_queue; ++k) {
2492 sqcp = &queued_arr[k];
2493 if (sqcp->in_use && sqcp->a_cmnd) {
2494 del_timer_sync(&sqcp->cmnd_timer);
2496 sqcp->a_cmnd = NULL;
2499 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2502 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2504 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2505 printk(KERN_INFO "scsi_debug: abort\n");
2507 stop_queued_cmnd(SCpnt);
2511 static int scsi_debug_biosparam(struct scsi_device *sdev,
2512 struct block_device * bdev, sector_t capacity, int *info)
2517 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2518 printk(KERN_INFO "scsi_debug: biosparam\n");
2519 buf = scsi_bios_ptable(bdev);
2521 res = scsi_partsize(buf, capacity,
2522 &info[2], &info[0], &info[1]);
2527 info[0] = sdebug_heads;
2528 info[1] = sdebug_sectors_per;
2529 info[2] = sdebug_cylinders_per;
2533 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2535 struct sdebug_dev_info * devip;
2537 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2538 printk(KERN_INFO "scsi_debug: device_reset\n");
2541 devip = devInfoReg(SCpnt->device);
2548 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2550 struct sdebug_host_info *sdbg_host;
2551 struct sdebug_dev_info * dev_info;
2552 struct scsi_device * sdp;
2553 struct Scsi_Host * hp;
2555 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2556 printk(KERN_INFO "scsi_debug: bus_reset\n");
2558 if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2559 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2561 list_for_each_entry(dev_info,
2562 &sdbg_host->dev_info_list,
2564 dev_info->reset = 1;
2570 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2572 struct sdebug_host_info * sdbg_host;
2573 struct sdebug_dev_info * dev_info;
2575 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2576 printk(KERN_INFO "scsi_debug: host_reset\n");
2578 spin_lock(&sdebug_host_list_lock);
2579 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2580 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2582 dev_info->reset = 1;
2584 spin_unlock(&sdebug_host_list_lock);
2589 /* Initializes timers in queued array */
2590 static void __init init_all_queued(void)
2592 unsigned long iflags;
2594 struct sdebug_queued_cmd * sqcp;
2596 spin_lock_irqsave(&queued_arr_lock, iflags);
2597 for (k = 0; k < scsi_debug_max_queue; ++k) {
2598 sqcp = &queued_arr[k];
2599 init_timer(&sqcp->cmnd_timer);
2601 sqcp->a_cmnd = NULL;
2603 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2606 static void __init sdebug_build_parts(unsigned char *ramp,
2607 unsigned long store_size)
2609 struct partition * pp;
2610 int starts[SDEBUG_MAX_PARTS + 2];
2611 int sectors_per_part, num_sectors, k;
2612 int heads_by_sects, start_sec, end_sec;
2614 /* assume partition table already zeroed */
2615 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2617 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2618 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2619 printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2620 "partitions to %d\n", SDEBUG_MAX_PARTS);
2622 num_sectors = (int)sdebug_store_sectors;
2623 sectors_per_part = (num_sectors - sdebug_sectors_per)
2624 / scsi_debug_num_parts;
2625 heads_by_sects = sdebug_heads * sdebug_sectors_per;
2626 starts[0] = sdebug_sectors_per;
2627 for (k = 1; k < scsi_debug_num_parts; ++k)
2628 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2630 starts[scsi_debug_num_parts] = num_sectors;
2631 starts[scsi_debug_num_parts + 1] = 0;
2633 ramp[510] = 0x55; /* magic partition markings */
2635 pp = (struct partition *)(ramp + 0x1be);
2636 for (k = 0; starts[k + 1]; ++k, ++pp) {
2637 start_sec = starts[k];
2638 end_sec = starts[k + 1] - 1;
2641 pp->cyl = start_sec / heads_by_sects;
2642 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2643 / sdebug_sectors_per;
2644 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2646 pp->end_cyl = end_sec / heads_by_sects;
2647 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2648 / sdebug_sectors_per;
2649 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2651 pp->start_sect = start_sec;
2652 pp->nr_sects = end_sec - start_sec + 1;
2653 pp->sys_ind = 0x83; /* plain Linux partition */
2657 static int schedule_resp(struct scsi_cmnd * cmnd,
2658 struct sdebug_dev_info * devip,
2659 done_funct_t done, int scsi_result, int delta_jiff)
2661 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2663 struct scsi_device * sdp = cmnd->device;
2665 printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
2666 "non-zero result=0x%x\n", sdp->host->host_no,
2667 sdp->channel, sdp->id, sdp->lun, scsi_result);
2670 if (cmnd && devip) {
2671 /* simulate autosense by this driver */
2672 if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2673 memcpy(cmnd->sense_buffer, devip->sense_buff,
2674 (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2675 SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2677 if (delta_jiff <= 0) {
2679 cmnd->result = scsi_result;
2684 unsigned long iflags;
2686 struct sdebug_queued_cmd * sqcp = NULL;
2688 spin_lock_irqsave(&queued_arr_lock, iflags);
2689 for (k = 0; k < scsi_debug_max_queue; ++k) {
2690 sqcp = &queued_arr[k];
2694 if (k >= scsi_debug_max_queue) {
2695 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2696 printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2697 return 1; /* report busy to mid level */
2700 sqcp->a_cmnd = cmnd;
2701 sqcp->scsi_result = scsi_result;
2702 sqcp->done_funct = done;
2703 sqcp->cmnd_timer.function = timer_intr_handler;
2704 sqcp->cmnd_timer.data = k;
2705 sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2706 add_timer(&sqcp->cmnd_timer);
2707 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2713 /* Note: The following macros create attribute files in the
2714 /sys/module/scsi_debug/parameters directory. Unfortunately this
2715 driver is unaware of a change and cannot trigger auxiliary actions
2716 as it can when the corresponding attribute in the
2717 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2719 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2720 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2721 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2722 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2723 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2724 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2725 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2726 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2727 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2728 module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2729 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
2730 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
2731 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
2732 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2733 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2734 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2735 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2736 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2737 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2738 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2739 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2740 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2741 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2742 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2743 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2744 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2745 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2746 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2747 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2748 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2749 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2750 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2752 module_param_named(write_same_length, scsi_debug_write_same_length, int,
2755 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2756 MODULE_DESCRIPTION("SCSI debug adapter driver");
2757 MODULE_LICENSE("GPL");
2758 MODULE_VERSION(SCSI_DEBUG_VERSION);
2760 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2761 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2762 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2763 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2764 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2765 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2766 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2767 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2768 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2769 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2770 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
2771 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2772 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2773 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2774 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2775 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2776 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2777 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2778 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2779 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2780 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2781 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2782 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2783 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2784 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2785 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2786 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2787 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2788 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2789 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2790 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2791 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2792 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2794 static char sdebug_info[256];
2796 static const char * scsi_debug_info(struct Scsi_Host * shp)
2798 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2799 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2800 scsi_debug_version_date, scsi_debug_dev_size_mb,
2805 /* scsi_debug_proc_info
2806 * Used if the driver currently has no own support for /proc/scsi
2808 static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
2809 int length, int inout)
2811 int len, pos, begin;
2814 orig_length = length;
2818 int minLen = length > 15 ? 15 : length;
2820 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2822 memcpy(arr, buffer, minLen);
2824 if (1 != sscanf(arr, "%d", &pos))
2826 scsi_debug_opts = pos;
2827 if (scsi_debug_every_nth != 0)
2828 scsi_debug_cmnd_count = 0;
2832 pos = len = sprintf(buffer, "scsi_debug adapter driver, version "
2834 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2835 "every_nth=%d(curr:%d)\n"
2836 "delay=%d, max_luns=%d, scsi_level=%d\n"
2837 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2838 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2839 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2840 SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2841 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2842 scsi_debug_cmnd_count, scsi_debug_delay,
2843 scsi_debug_max_luns, scsi_debug_scsi_level,
2844 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2845 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2846 num_host_resets, dix_reads, dix_writes, dif_errors);
2851 *start = buffer + (offset - begin); /* Start of wanted data */
2852 len -= (offset - begin);
2858 static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf)
2860 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2863 static ssize_t sdebug_delay_store(struct device_driver * ddp,
2864 const char * buf, size_t count)
2869 if (1 == sscanf(buf, "%10s", work)) {
2870 if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2871 scsi_debug_delay = delay;
2877 DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show,
2878 sdebug_delay_store);
2880 static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf)
2882 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2885 static ssize_t sdebug_opts_store(struct device_driver * ddp,
2886 const char * buf, size_t count)
2891 if (1 == sscanf(buf, "%10s", work)) {
2892 if (0 == strnicmp(work,"0x", 2)) {
2893 if (1 == sscanf(&work[2], "%x", &opts))
2896 if (1 == sscanf(work, "%d", &opts))
2902 scsi_debug_opts = opts;
2903 scsi_debug_cmnd_count = 0;
2906 DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show,
2909 static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf)
2911 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2913 static ssize_t sdebug_ptype_store(struct device_driver * ddp,
2914 const char * buf, size_t count)
2918 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2919 scsi_debug_ptype = n;
2924 DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store);
2926 static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf)
2928 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2930 static ssize_t sdebug_dsense_store(struct device_driver * ddp,
2931 const char * buf, size_t count)
2935 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2936 scsi_debug_dsense = n;
2941 DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
2942 sdebug_dsense_store);
2944 static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2946 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2948 static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2949 const char * buf, size_t count)
2953 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2954 scsi_debug_fake_rw = n;
2959 DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2960 sdebug_fake_rw_store);
2962 static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2964 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2966 static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
2967 const char * buf, size_t count)
2971 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2972 scsi_debug_no_lun_0 = n;
2977 DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
2978 sdebug_no_lun_0_store);
2980 static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
2982 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
2984 static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,
2985 const char * buf, size_t count)
2989 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2990 scsi_debug_num_tgts = n;
2991 sdebug_max_tgts_luns();
2996 DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show,
2997 sdebug_num_tgts_store);
2999 static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf)
3001 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3003 DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL);
3005 static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf)
3007 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3009 DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL);
3011 static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf)
3013 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3015 static ssize_t sdebug_every_nth_store(struct device_driver * ddp,
3016 const char * buf, size_t count)
3020 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3021 scsi_debug_every_nth = nth;
3022 scsi_debug_cmnd_count = 0;
3027 DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show,
3028 sdebug_every_nth_store);
3030 static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf)
3032 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3034 static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
3035 const char * buf, size_t count)
3039 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3040 scsi_debug_max_luns = n;
3041 sdebug_max_tgts_luns();
3046 DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
3047 sdebug_max_luns_store);
3049 static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
3051 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3053 static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
3054 const char * buf, size_t count)
3058 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3059 (n <= SCSI_DEBUG_CANQUEUE)) {
3060 scsi_debug_max_queue = n;