Pull asus into release branch
[pandora-kernel.git] / drivers / infiniband / hw / ehca / ehca_irq.c
index 3ec53c6..f284be1 100644 (file)
@@ -66,7 +66,9 @@
 static void queue_comp_task(struct ehca_cq *__cq);
 
 static struct ehca_comp_pool* pool;
+#ifdef CONFIG_HOTPLUG_CPU
 static struct notifier_block comp_pool_callback_nb;
+#endif
 
 static inline void comp_event_callback(struct ehca_cq *cq)
 {
@@ -404,10 +406,11 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
        u32 token;
        unsigned long flags;
        struct ehca_cq *cq;
+
        eqe_value = eqe->entry;
        ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);
        if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
-               ehca_dbg(&shca->ib_device, "... completion event");
+               ehca_dbg(&shca->ib_device, "Got completion event");
                token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
                spin_lock_irqsave(&ehca_cq_idr_lock, flags);
                cq = idr_find(&ehca_cq_idr, token);
@@ -419,16 +422,20 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
                        return;
                }
                reset_eq_pending(cq);
-               if (ehca_scaling_code) {
+               cq->nr_events++;
+               spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+               if (ehca_scaling_code)
                        queue_comp_task(cq);
-                       spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
-               } else {
-                       spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+               else {
                        comp_event_callback(cq);
+                       spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+                       cq->nr_events--;
+                       if (!cq->nr_events)
+                               wake_up(&cq->wait_completion);
+                       spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
                }
        } else {
-               ehca_dbg(&shca->ib_device,
-                        "Got non completion event");
+               ehca_dbg(&shca->ib_device, "Got non completion event");
                parse_identifier(shca, eqe_value);
        }
 }
@@ -478,6 +485,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
                                         "token=%x", token);
                                continue;
                        }
+                       eqe_cache[eqe_cnt].cq->nr_events++;
                        spin_unlock(&ehca_cq_idr_lock);
                } else
                        eqe_cache[eqe_cnt].cq = NULL;
@@ -504,12 +512,18 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
        /* call completion handler for cached eqes */
        for (i = 0; i < eqe_cnt; i++)
                if (eq->eqe_cache[i].cq) {
-                       if (ehca_scaling_code) {
-                               spin_lock(&ehca_cq_idr_lock);
+                       if (ehca_scaling_code)
                                queue_comp_task(eq->eqe_cache[i].cq);
-                               spin_unlock(&ehca_cq_idr_lock);
-                       } else
-                               comp_event_callback(eq->eqe_cache[i].cq);
+                       else {
+                               struct ehca_cq *cq = eq->eqe_cache[i].cq;
+                               comp_event_callback(cq);
+                               spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+                               cq->nr_events--;
+                               if (!cq->nr_events)
+                                       wake_up(&cq->wait_completion);
+                               spin_unlock_irqrestore(&ehca_cq_idr_lock,
+                                                      flags);
+                       }
                } else {
                        ehca_dbg(&shca->ib_device, "Got non completion event");
                        parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
@@ -523,7 +537,6 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
                if (!eqe)
                        break;
                process_eqe(shca, eqe);
-               eqe_cnt++;
        } while (1);
 
 unlock_irq_spinlock:
@@ -567,8 +580,7 @@ static void __queue_comp_task(struct ehca_cq *__cq,
                list_add_tail(&__cq->entry, &cct->cq_list);
                cct->cq_jobs++;
                wake_up(&cct->wait_queue);
-       }
-       else
+       } else
                __cq->nr_callbacks++;
 
        spin_unlock(&__cq->task_lock);
@@ -577,18 +589,21 @@ static void __queue_comp_task(struct ehca_cq *__cq,
 
 static void queue_comp_task(struct ehca_cq *__cq)
 {
-       int cpu;
        int cpu_id;
        struct ehca_cpu_comp_task *cct;
+       int cq_jobs;
+       unsigned long flags;
 
-       cpu = get_cpu();
        cpu_id = find_next_online_cpu(pool);
        BUG_ON(!cpu_online(cpu_id));
 
        cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
        BUG_ON(!cct);
 
-       if (cct->cq_jobs > 0) {
+       spin_lock_irqsave(&cct->task_lock, flags);
+       cq_jobs = cct->cq_jobs;
+       spin_unlock_irqrestore(&cct->task_lock, flags);
+       if (cq_jobs > 0) {
                cpu_id = find_next_online_cpu(pool);
                cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
                BUG_ON(!cct);
@@ -608,11 +623,17 @@ static void run_comp_task(struct ehca_cpu_comp_task* cct)
                cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
                spin_unlock_irqrestore(&cct->task_lock, flags);
                comp_event_callback(cq);
-               spin_lock_irqsave(&cct->task_lock, flags);
 
+               spin_lock_irqsave(&ehca_cq_idr_lock, flags);
+               cq->nr_events--;
+               if (!cq->nr_events)
+                       wake_up(&cq->wait_completion);
+               spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+
+               spin_lock_irqsave(&cct->task_lock, flags);
                spin_lock(&cq->task_lock);
                cq->nr_callbacks--;
-               if (cq->nr_callbacks == 0) {
+               if (!cq->nr_callbacks) {
                        list_del_init(cct->cq_list.next);
                        cct->cq_jobs--;
                }
@@ -714,6 +735,7 @@ static void take_over_work(struct ehca_comp_pool *pool,
 
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
 static int comp_pool_callback(struct notifier_block *nfb,
                              unsigned long action,
                              void *hcpu)
@@ -756,6 +778,7 @@ static int comp_pool_callback(struct notifier_block *nfb,
 
        return NOTIFY_OK;
 }
+#endif
 
 int ehca_create_comp_pool(void)
 {
@@ -786,9 +809,11 @@ int ehca_create_comp_pool(void)
                }
        }
 
+#ifdef CONFIG_HOTPLUG_CPU
        comp_pool_callback_nb.notifier_call = comp_pool_callback;
        comp_pool_callback_nb.priority =0;
        register_cpu_notifier(&comp_pool_callback_nb);
+#endif
 
        printk(KERN_INFO "eHCA scaling code enabled\n");
 
@@ -802,7 +827,9 @@ void ehca_destroy_comp_pool(void)
        if (!ehca_scaling_code)
                return;
 
+#ifdef CONFIG_HOTPLUG_CPU
        unregister_cpu_notifier(&comp_pool_callback_nb);
+#endif
 
        for (i = 0; i < NR_CPUS; i++) {
                if (cpu_online(i))