[SCSI] sd: WRITE SAME(16) / UNMAP support
authorMartin K. Petersen <martin.petersen@oracle.com>
Thu, 26 Nov 2009 17:00:40 +0000 (12:00 -0500)
committerJames Bottomley <James.Bottomley@suse.de>
Thu, 10 Dec 2009 14:54:15 +0000 (08:54 -0600)
Implement a function for handling discard requests that sends either
WRITE SAME(16) or UNMAP(10) depending on parameters indicated by the
device in the block limits VPD.

Extract unmap constraints and report them to the block layer.

Based in part by a patch by Christoph Hellwig <hch@lst.de>.

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
drivers/scsi/sd.c
drivers/scsi/sd.h

index 9093c72..255da53 100644 (file)
@@ -264,6 +264,15 @@ sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
        return snprintf(buf, 20, "%u\n", sdkp->ATO);
 }
 
        return snprintf(buf, 20, "%u\n", sdkp->ATO);
 }
 
+static ssize_t
+sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+       return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning);
+}
+
 static struct device_attribute sd_disk_attrs[] = {
        __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
               sd_store_cache_type),
 static struct device_attribute sd_disk_attrs[] = {
        __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
               sd_store_cache_type),
@@ -274,6 +283,7 @@ static struct device_attribute sd_disk_attrs[] = {
               sd_store_manage_start_stop),
        __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
        __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
               sd_store_manage_start_stop),
        __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
        __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
+       __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
        __ATTR_NULL,
 };
 
        __ATTR_NULL,
 };
 
@@ -398,6 +408,57 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
        scsi_set_prot_type(scmd, dif);
 }
 
        scsi_set_prot_type(scmd, dif);
 }
 
+/**
+ * sd_prepare_discard - unmap blocks on thinly provisioned device
+ * @rq: Request to prepare
+ *
+ * Will issue either UNMAP or WRITE SAME(16) depending on preference
+ * indicated by target device.
+ **/
+static int sd_prepare_discard(struct request *rq)
+{
+       struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+       struct bio *bio = rq->bio;
+       sector_t sector = bio->bi_sector;
+       unsigned int num = bio_sectors(bio);
+
+       if (sdkp->device->sector_size == 4096) {
+               sector >>= 3;
+               num >>= 3;
+       }
+
+       rq->cmd_type = REQ_TYPE_BLOCK_PC;
+       rq->timeout = SD_TIMEOUT;
+
+       memset(rq->cmd, 0, rq->cmd_len);
+
+       if (sdkp->unmap) {
+               char *buf = kmap_atomic(bio_page(bio), KM_USER0);
+
+               rq->cmd[0] = UNMAP;
+               rq->cmd[8] = 24;
+               rq->cmd_len = 10;
+
+               /* Ensure that data length matches payload */
+               rq->__data_len = bio->bi_size = bio->bi_io_vec->bv_len = 24;
+
+               put_unaligned_be16(6 + 16, &buf[0]);
+               put_unaligned_be16(16, &buf[2]);
+               put_unaligned_be64(sector, &buf[8]);
+               put_unaligned_be32(num, &buf[16]);
+
+               kunmap_atomic(buf, KM_USER0);
+       } else {
+               rq->cmd[0] = WRITE_SAME_16;
+               rq->cmd[1] = 0x8; /* UNMAP */
+               put_unaligned_be64(sector, &rq->cmd[2]);
+               put_unaligned_be32(num, &rq->cmd[10]);
+               rq->cmd_len = 16;
+       }
+
+       return BLKPREP_OK;
+}
+
 /**
  *     sd_init_command - build a scsi (read or write) command from
  *     information in the request structure.
 /**
  *     sd_init_command - build a scsi (read or write) command from
  *     information in the request structure.
@@ -418,6 +479,13 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
        int ret, host_dif;
        unsigned char protect;
 
        int ret, host_dif;
        unsigned char protect;
 
+       /*
+        * Discard request come in as REQ_TYPE_FS but we turn them into
+        * block PC requests to make life easier.
+        */
+       if (blk_discard_rq(rq))
+               ret = sd_prepare_discard(rq);
+
        if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
                ret = scsi_setup_blk_pc_cmnd(sdp, rq);
                goto out;
        if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
                ret = scsi_setup_blk_pc_cmnd(sdp, rq);
                goto out;
@@ -1432,6 +1500,19 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
                sd_printk(KERN_NOTICE, sdkp,
                          "physical block alignment offset: %u\n", alignment);
 
                sd_printk(KERN_NOTICE, sdkp,
                          "physical block alignment offset: %u\n", alignment);
 
+       if (buffer[14] & 0x80) { /* TPE */
+               struct request_queue *q = sdp->request_queue;
+
+               sdkp->thin_provisioning = 1;
+               q->limits.discard_granularity = sdkp->hw_sector_size;
+               q->limits.max_discard_sectors = 0xffffffff;
+
+               if (buffer[14] & 0x40) /* TPRZ */
+                       q->limits.discard_zeroes_data = 1;
+
+               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+       }
+
        sdkp->capacity = lba + 1;
        return sector_size;
 }
        sdkp->capacity = lba + 1;
        return sector_size;
 }
@@ -1863,6 +1944,7 @@ void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
  */
 static void sd_read_block_limits(struct scsi_disk *sdkp)
 {
  */
 static void sd_read_block_limits(struct scsi_disk *sdkp)
 {
+       struct request_queue *q = sdkp->disk->queue;
        unsigned int sector_sz = sdkp->device->sector_size;
        char *buffer;
 
        unsigned int sector_sz = sdkp->device->sector_size;
        char *buffer;
 
@@ -1877,6 +1959,31 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
        blk_queue_io_opt(sdkp->disk->queue,
                         get_unaligned_be32(&buffer[12]) * sector_sz);
 
        blk_queue_io_opt(sdkp->disk->queue,
                         get_unaligned_be32(&buffer[12]) * sector_sz);
 
+       /* Thin provisioning enabled and page length indicates TP support */
+       if (sdkp->thin_provisioning && buffer[3] == 0x3c) {
+               unsigned int lba_count, desc_count, granularity;
+
+               lba_count = get_unaligned_be32(&buffer[20]);
+               desc_count = get_unaligned_be32(&buffer[24]);
+
+               if (lba_count) {
+                       q->limits.max_discard_sectors =
+                               lba_count * sector_sz >> 9;
+
+                       if (desc_count)
+                               sdkp->unmap = 1;
+               }
+
+               granularity = get_unaligned_be32(&buffer[28]);
+
+               if (granularity)
+                       q->limits.discard_granularity = granularity * sector_sz;
+
+               if (buffer[32] & 0x80)
+                       q->limits.discard_alignment =
+                               get_unaligned_be32(&buffer[32]) & ~(1 << 31);
+       }
+
        kfree(buffer);
 }
 
        kfree(buffer);
 }
 
index e374804..43d3caf 100644 (file)
@@ -60,6 +60,8 @@ struct scsi_disk {
        unsigned        RCD : 1;        /* state of disk RCD bit, unused */
        unsigned        DPOFUA : 1;     /* state of disk DPOFUA bit */
        unsigned        first_scan : 1;
        unsigned        RCD : 1;        /* state of disk RCD bit, unused */
        unsigned        DPOFUA : 1;     /* state of disk DPOFUA bit */
        unsigned        first_scan : 1;
+       unsigned        thin_provisioning : 1;
+       unsigned        unmap : 1;
 };
 #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
 
 };
 #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)