Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux...
authorLinus Torvalds <torvalds@woody.linux-foundation.org>
Tue, 11 Mar 2008 16:47:28 +0000 (09:47 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Tue, 11 Mar 2008 16:47:28 +0000 (09:47 -0700)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86:
  x86: remove quicklists
  x86: ia32 syscall restart fix
  x86: ioremap, remove WARN_ON()

MAINTAINERS
drivers/infiniband/core/cm.c
drivers/infiniband/core/fmr_pool.c
drivers/infiniband/core/iwcm.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/ulp/iser/iser_verbs.c
kernel/sched.c

index 558636e..25f450f 100644 (file)
@@ -2156,7 +2156,7 @@ L:        netdev@vger.kernel.org
 S:     Maintained
 
 IPATH DRIVER:
-P:     Arthur Jones
+P:     Ralph Campbell
 M:     infinipath@qlogic.com
 L:     general@lists.openfabrics.org
 T:     git git://git.qlogic.com/ipath-linux-2.6
index b10ade9..4df4051 100644 (file)
@@ -3759,6 +3759,7 @@ static void cm_remove_one(struct ib_device *device)
                port = cm_dev->port[i-1];
                ib_modify_port(device, port->port_num, 0, &port_modify);
                ib_unregister_mad_agent(port->mad_agent);
+               flush_workqueue(cm.wq);
                cm_remove_port_fs(port);
        }
        kobject_put(&cm_dev->dev_obj);
@@ -3813,6 +3814,7 @@ static void __exit ib_cm_cleanup(void)
                cancel_delayed_work(&timewait_info->work.work);
        spin_unlock_irq(&cm.lock);
 
+       ib_unregister_client(&cm_client);
        destroy_workqueue(cm.wq);
 
        list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
@@ -3820,7 +3822,6 @@ static void __exit ib_cm_cleanup(void)
                kfree(timewait_info);
        }
 
-       ib_unregister_client(&cm_client);
        class_unregister(&cm_class);
        idr_destroy(&cm.local_id_table);
 }
index 7f00347..06d502c 100644 (file)
@@ -139,7 +139,7 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
 static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
 {
        int                 ret;
-       struct ib_pool_fmr *fmr, *next;
+       struct ib_pool_fmr *fmr;
        LIST_HEAD(unmap_list);
        LIST_HEAD(fmr_list);
 
@@ -158,20 +158,6 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
 #endif
        }
 
-       /*
-        * The free_list may hold FMRs that have been put there
-        * because they haven't reached the max_remap count.
-        * Invalidate their mapping as well.
-        */
-       list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
-               if (fmr->remap_count == 0)
-                       continue;
-               hlist_del_init(&fmr->cache_node);
-               fmr->remap_count = 0;
-               list_add_tail(&fmr->fmr->list, &fmr_list);
-               list_move(&fmr->list, &unmap_list);
-       }
-
        list_splice(&pool->dirty_list, &unmap_list);
        INIT_LIST_HEAD(&pool->dirty_list);
        pool->dirty_len = 0;
@@ -384,6 +370,11 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
 
        i = 0;
        list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
+               if (fmr->remap_count) {
+                       INIT_LIST_HEAD(&fmr_list);
+                       list_add_tail(&fmr->fmr->list, &fmr_list);
+                       ib_unmap_fmr(&fmr_list);
+               }
                ib_dealloc_fmr(fmr->fmr);
                list_del(&fmr->list);
                kfree(fmr);
@@ -407,8 +398,23 @@ EXPORT_SYMBOL(ib_destroy_fmr_pool);
  */
 int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
 {
-       int serial = atomic_inc_return(&pool->req_ser);
+       int serial;
+       struct ib_pool_fmr *fmr, *next;
+
+       /*
+        * The free_list holds FMRs that may have been used
+        * but have not been remapped enough times to be dirty.
+        * Put them on the dirty list now so that the cleanup
+        * thread will reap them too.
+        */
+       spin_lock_irq(&pool->pool_lock);
+       list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
+               if (fmr->remap_count > 0)
+                       list_move(&fmr->list, &pool->dirty_list);
+       }
+       spin_unlock_irq(&pool->pool_lock);
 
+       serial = atomic_inc_return(&pool->req_ser);
        wake_up_process(pool->thread);
 
        if (wait_event_interruptible(pool->force_wait,
index 223b1aa..81c9195 100644 (file)
@@ -839,6 +839,7 @@ static void cm_work_handler(struct work_struct *_work)
        unsigned long flags;
        int empty;
        int ret = 0;
+       int destroy_id;
 
        spin_lock_irqsave(&cm_id_priv->lock, flags);
        empty = list_empty(&cm_id_priv->work_list);
@@ -857,9 +858,9 @@ static void cm_work_handler(struct work_struct *_work)
                        destroy_cm_id(&cm_id_priv->id);
                }
                BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
+               destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
                if (iwcm_deref_id(cm_id_priv)) {
-                       if (test_bit(IWCM_F_CALLBACK_DESTROY,
-                                    &cm_id_priv->flags)) {
+                       if (destroy_id) {
                                BUG_ON(!list_empty(&cm_id_priv->work_list));
                                free_cm_id(cm_id_priv);
                        }
index df1838f..b2ea921 100644 (file)
@@ -189,7 +189,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
                return ERR_PTR(-ENOMEM);
        }
        chp->rhp = rhp;
-       chp->ibcq.cqe = (1 << chp->cq.size_log2) - 1;
+       chp->ibcq.cqe = 1 << chp->cq.size_log2;
        spin_lock_init(&chp->lock);
        atomic_set(&chp->refcnt, 1);
        init_waitqueue_head(&chp->wait);
@@ -819,8 +819,11 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
                kfree(qhp);
                return ERR_PTR(-ENOMEM);
        }
+
        attrs->cap.max_recv_wr = rqsize - 1;
        attrs->cap.max_send_wr = sqsize;
+       attrs->cap.max_inline_data = T3_MAX_INLINE;
+
        qhp->rhp = rhp;
        qhp->attr.pd = php->pdid;
        qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
index 714b8db..993f0a8 100644 (file)
@@ -237,36 +237,32 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
 static
 struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
 {
-       struct list_head    *p_list;
-       struct iser_device  *device = NULL;
+       struct iser_device *device;
 
        mutex_lock(&ig.device_list_mutex);
 
-       p_list = ig.device_list.next;
-       while (p_list != &ig.device_list) {
-               device = list_entry(p_list, struct iser_device, ig_list);
+       list_for_each_entry(device, &ig.device_list, ig_list)
                /* find if there's a match using the node GUID */
                if (device->ib_device->node_guid == cma_id->device->node_guid)
-                       break;
-       }
+                       goto inc_refcnt;
 
-       if (device == NULL) {
-               device = kzalloc(sizeof *device, GFP_KERNEL);
-               if (device == NULL)
-                       goto out;
-               /* assign this device to the device */
-               device->ib_device = cma_id->device;
-               /* init the device and link it into ig device list */
-               if (iser_create_device_ib_res(device)) {
-                       kfree(device);
-                       device = NULL;
-                       goto out;
-               }
-               list_add(&device->ig_list, &ig.device_list);
+       device = kzalloc(sizeof *device, GFP_KERNEL);
+       if (device == NULL)
+               goto out;
+
+       /* assign this device to the device */
+       device->ib_device = cma_id->device;
+       /* init the device and link it into ig device list */
+       if (iser_create_device_ib_res(device)) {
+               kfree(device);
+               device = NULL;
+               goto out;
        }
-out:
-       BUG_ON(device == NULL);
+       list_add(&device->ig_list, &ig.device_list);
+
+inc_refcnt:
        device->refcount++;
+out:
        mutex_unlock(&ig.device_list_mutex);
        return device;
 }
@@ -372,6 +368,12 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
        int    ret;
 
        device = iser_device_find_by_ib_device(cma_id);
+       if (!device) {
+               iser_err("device lookup/creation failed\n");
+               iser_connect_error(cma_id);
+               return;
+       }
+
        ib_conn = (struct iser_conn *)cma_id->context;
        ib_conn->device = device;
 
@@ -380,7 +382,6 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
                iser_err("resolve route failed: %d\n", ret);
                iser_connect_error(cma_id);
        }
-       return;
 }
 
 static void iser_route_handler(struct rdma_cm_id *cma_id)
index b02e4fc..1cb53fb 100644 (file)
@@ -5813,13 +5813,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                /* Must be high prio: stop_machine expects to yield to it. */
                rq = task_rq_lock(p, &flags);
                __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
-
-               /* Update our root-domain */
-               if (rq->rd) {
-                       BUG_ON(!cpu_isset(cpu, rq->rd->span));
-                       cpu_set(cpu, rq->rd->online);
-               }
-
                task_rq_unlock(rq, &flags);
                cpu_rq(cpu)->migration_thread = p;
                break;
@@ -5828,6 +5821,15 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
        case CPU_ONLINE_FROZEN:
                /* Strictly unnecessary, as first user will wake it. */
                wake_up_process(cpu_rq(cpu)->migration_thread);
+
+               /* Update our root-domain */
+               rq = cpu_rq(cpu);
+               spin_lock_irqsave(&rq->lock, flags);
+               if (rq->rd) {
+                       BUG_ON(!cpu_isset(cpu, rq->rd->span));
+                       cpu_set(cpu, rq->rd->online);
+               }
+               spin_unlock_irqrestore(&rq->lock, flags);
                break;
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -5879,7 +5881,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                spin_unlock_irq(&rq->lock);
                break;
 
-       case CPU_DOWN_PREPARE:
+       case CPU_DYING:
+       case CPU_DYING_FROZEN:
                /* Update our root-domain */
                rq = cpu_rq(cpu);
                spin_lock_irqsave(&rq->lock, flags);
@@ -6103,6 +6106,8 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
        rq->rd = rd;
 
        cpu_set(rq->cpu, rd->span);
+       if (cpu_isset(rq->cpu, cpu_online_map))
+               cpu_set(rq->cpu, rd->online);
 
        for (class = sched_class_highest; class; class = class->next) {
                if (class->join_domain)