Merge tag 'v3.15-rc1' into for-3.16/core
authorJens Axboe <axboe@fb.com>
Tue, 15 Apr 2014 20:02:24 +0000 (14:02 -0600)
committerJens Axboe <axboe@fb.com>
Tue, 15 Apr 2014 20:02:24 +0000 (14:02 -0600)
We don't like this, but things have diverged with the blk-mq fixes
in 3.15-rc1. So merge it in.

1  2 
block/blk-core.c
drivers/scsi/scsi_lib.c
include/linux/blkdev.h

diff --combined block/blk-core.c
@@@ -1307,7 -1307,7 +1307,7 @@@ void __blk_put_request(struct request_q
                struct request_list *rl = blk_rq_rl(req);
  
                BUG_ON(!list_empty(&req->queuelist));
-               BUG_ON(!hlist_unhashed(&req->hash));
+               BUG_ON(ELV_ON_HASH(req));
  
                blk_free_request(rl, req);
                freed_request(rl, flags);
@@@ -1654,7 -1654,7 +1654,7 @@@ static int __init fail_make_request_deb
        struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
                                                NULL, &fail_make_request);
  
 -      return IS_ERR(dir) ? PTR_ERR(dir) : 0;
 +      return PTR_ERR_OR_ZERO(dir);
  }
  
  late_initcall(fail_make_request_debugfs);
@@@ -2904,26 -2904,19 +2904,26 @@@ free_and_out
  }
  EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
  
 -int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
 +int kblockd_schedule_work(struct work_struct *work)
  {
        return queue_work(kblockd_workqueue, work);
  }
  EXPORT_SYMBOL(kblockd_schedule_work);
  
 -int kblockd_schedule_delayed_work(struct request_queue *q,
 -                      struct delayed_work *dwork, unsigned long delay)
 +int kblockd_schedule_delayed_work(struct delayed_work *dwork,
 +                                unsigned long delay)
  {
        return queue_delayed_work(kblockd_workqueue, dwork, delay);
  }
  EXPORT_SYMBOL(kblockd_schedule_delayed_work);
  
 +int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
 +                                   unsigned long delay)
 +{
 +      return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
 +}
 +EXPORT_SYMBOL(kblockd_schedule_delayed_work_on);
 +
  #define PLUG_MAGIC    0x91827364
  
  /**
diff --combined drivers/scsi/scsi_lib.c
@@@ -139,7 -139,7 +139,7 @@@ static void __scsi_queue_insert(struct 
         */
        spin_lock_irqsave(q->queue_lock, flags);
        blk_requeue_request(q, cmd->request);
 -      kblockd_schedule_work(q, &device->requeue_work);
 +      kblockd_schedule_work(&device->requeue_work);
        spin_unlock_irqrestore(q->queue_lock, flags);
  }
  
@@@ -184,7 -184,7 +184,7 @@@ void scsi_queue_insert(struct scsi_cmn
   */
  int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
                 int data_direction, void *buffer, unsigned bufflen,
-                unsigned char *sense, int timeout, int retries, int flags,
+                unsigned char *sense, int timeout, int retries, u64 flags,
                 int *resid)
  {
        struct request *req;
@@@ -235,7 -235,7 +235,7 @@@ EXPORT_SYMBOL(scsi_execute)
  int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
                     int data_direction, void *buffer, unsigned bufflen,
                     struct scsi_sense_hdr *sshdr, int timeout, int retries,
-                    int *resid, int flags)
+                    int *resid, u64 flags)
  {
        char *sense = NULL;
        int result;
diff --combined include/linux/blkdev.h
@@@ -118,7 -118,18 +118,18 @@@ struct request 
        struct bio *bio;
        struct bio *biotail;
  
-       struct hlist_node hash; /* merge hash */
+       /*
+        * The hash is used inside the scheduler, and killed once the
+        * request reaches the dispatch list. The ipi_list is only used
+        * to queue the request for softirq completion, which is long
+        * after the request has been unhashed (and even removed from
+        * the dispatch list).
+        */
+       union {
+               struct hlist_node hash; /* merge hash */
+               struct list_head ipi_list;
+       };
        /*
         * The rb_node is only used inside the io scheduler, requests
         * are pruned when moved to the dispatch queue. So let the
@@@ -824,8 -835,8 +835,8 @@@ extern int blk_rq_map_user(struct reque
  extern int blk_rq_unmap_user(struct bio *);
  extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
  extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
-                              struct rq_map_data *, struct sg_iovec *, int,
-                              unsigned int, gfp_t);
+                              struct rq_map_data *, const struct sg_iovec *,
+                              int, unsigned int, gfp_t);
  extern int blk_execute_rq(struct request_queue *, struct gendisk *,
                          struct request *, int);
  extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
@@@ -1359,9 -1370,8 +1370,9 @@@ static inline void put_dev_sector(Secto
  }
  
  struct work_struct;
 -int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
 -int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
 +int kblockd_schedule_work(struct work_struct *work);
 +int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
 +int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
  
  #ifdef CONFIG_BLK_CGROUP
  /*