Merge branch 'for-3.3/core' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 15 Jan 2012 20:24:45 +0000 (12:24 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 15 Jan 2012 20:24:45 +0000 (12:24 -0800)
* 'for-3.3/core' of git://git.kernel.dk/linux-block: (37 commits)
  Revert "block: recursive merge requests"
  block: Stop using macro stubs for the bio data integrity calls
  blockdev: convert some macros to static inlines
  fs: remove unneeded plug in mpage_readpages()
  block: Add BLKROTATIONAL ioctl
  block: Introduce blk_set_stacking_limits function
  block: remove WARN_ON_ONCE() in exit_io_context()
  block: an exiting task should be allowed to create io_context
  block: ioc_cgroup_changed() needs to be exported
  block: recursive merge requests
  block, cfq: fix empty queue crash caused by request merge
  block, cfq: move icq creation and rq->elv.icq association to block core
  block, cfq: restructure io_cq creation path for io_context interface cleanup
  block, cfq: move io_cq exit/release to blk-ioc.c
  block, cfq: move icq cache management to block core
  block, cfq: move io_cq lookup to blk-ioc.c
  block, cfq: move cfqd->icq_list to request_queue and add request->elv.icq
  block, cfq: reorganize cfq_io_context into generic and cfq specific parts
  block: remove elevator_queue->ops
  block: reorder elevator switch sequence
  ...

Fix up conflicts in:
 - block/blk-cgroup.c
Switch from can_attach_task to can_attach
 - block/cfq-iosched.c
conflict with now removed cic index changes (we now use q->id instead)

1  2 
block/blk-cgroup.c
block/blk-core.c
block/bsg.c
block/genhd.c
block/ioctl.c
drivers/md/md.c
include/linux/blkdev.h
include/linux/fs.h
kernel/fork.c

@@@ -1648,18 -1641,15 +1648,19 @@@ static int blkiocg_can_attach(struct cg
        return ret;
  }
  
 -static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
 +static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
 +                         struct cgroup_taskset *tset)
  {
 +      struct task_struct *task;
        struct io_context *ioc;
  
 -      /* we don't lose anything even if ioc allocation fails */
 -      ioc = get_task_io_context(tsk, GFP_ATOMIC, NUMA_NO_NODE);
 -      if (ioc) {
 -              ioc_cgroup_changed(ioc);
 -              put_io_context(ioc, NULL);
 +      cgroup_taskset_for_each(task, cgrp, tset) {
-               task_lock(task);
-               ioc = task->io_context;
-               if (ioc)
-                       ioc->cgroup_changed = 1;
-               task_unlock(task);
++              /* we don't lose anything even if ioc allocation fails */
++              ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
++              if (ioc) {
++                      ioc_cgroup_changed(ioc);
++                      put_io_context(ioc, NULL);
++              }
        }
  }
  
@@@ -366,19 -369,23 +369,30 @@@ void blk_drain_queue(struct request_que
                if (drain_all)
                        blk_throtl_drain(q);
  
 -              __blk_run_queue(q);
 +              /*
 +               * This function might be called on a queue which failed
 +               * driver init after queue creation.  Some drivers
 +               * (e.g. fd) get unhappy in such cases.  Kick queue iff
 +               * dispatch queue has something on it.
 +               */
 +              if (!list_empty(&q->queue_head))
 +                      __blk_run_queue(q);
  
-               if (drain_all)
-                       nr_rqs = q->rq.count[0] + q->rq.count[1];
-               else
-                       nr_rqs = q->rq.elvpriv;
+               drain |= q->rq.elvpriv;
+               /*
+                * Unfortunately, requests are queued at and tracked from
+                * multiple places and there's no single counter which can
+                * be drained.  Check all the queues and counters.
+                */
+               if (drain_all) {
+                       drain |= !list_empty(&q->queue_head);
+                       for (i = 0; i < 2; i++) {
+                               drain |= q->rq.count[i];
+                               drain |= q->in_flight[i];
+                               drain |= !list_empty(&q->flush_queue[i]);
+                       }
+               }
  
                spin_unlock_irq(q->queue_lock);
  
@@@ -474,18 -485,13 +492,14 @@@ struct request_queue *blk_alloc_queue_n
        q->backing_dev_info.state = 0;
        q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
        q->backing_dev_info.name = "block";
 +      q->node = node_id;
  
        err = bdi_init(&q->backing_dev_info);
-       if (err) {
-               kmem_cache_free(blk_requestq_cachep, q);
-               return NULL;
-       }
+       if (err)
+               goto fail_id;
  
-       if (blk_throtl_init(q)) {
-               kmem_cache_free(blk_requestq_cachep, q);
-               return NULL;
-       }
+       if (blk_throtl_init(q))
+               goto fail_id;
  
        setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
                    laptop_mode_timer_fn, (unsigned long) q);
@@@ -603,16 -625,16 +624,16 @@@ blk_init_allocated_queue(struct request
  
        return NULL;
  }
 -EXPORT_SYMBOL(blk_init_allocated_queue_node);
 +EXPORT_SYMBOL(blk_init_allocated_queue);
  
int blk_get_queue(struct request_queue *q)
bool blk_get_queue(struct request_queue *q)
  {
-       if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
-               kobject_get(&q->kobj);
-               return 0;
+       if (likely(!blk_queue_dead(q))) {
+               __blk_get_queue(q);
+               return true;
        }
  
-       return 1;
+       return false;
  }
  EXPORT_SYMBOL(blk_get_queue);
  
diff --cc block/bsg.c
Simple merge
diff --cc block/genhd.c
Simple merge
diff --cc block/ioctl.c
Simple merge
diff --cc drivers/md/md.c
Simple merge
Simple merge
Simple merge
diff --cc kernel/fork.c
Simple merge