2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * Functions for EQs, NEQs and interrupts
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
9 * Joachim Fenkes <fenkes@de.ibm.com>
11 * Copyright (c) 2005 IBM Corporation
13 * All rights reserved.
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
44 #include "ehca_classes.h"
46 #include "ehca_iverbs.h"
47 #include "ehca_tools.h"
50 #include "ipz_pt_fn.h"
52 #define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1)
53 #define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31)
54 #define EQE_EE_IDENTIFIER EHCA_BMASK_IBM(2,7)
55 #define EQE_CQ_NUMBER EHCA_BMASK_IBM(8,31)
56 #define EQE_QP_NUMBER EHCA_BMASK_IBM(8,31)
57 #define EQE_QP_TOKEN EHCA_BMASK_IBM(32,63)
58 #define EQE_CQ_TOKEN EHCA_BMASK_IBM(32,63)
60 #define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1)
61 #define NEQE_EVENT_CODE EHCA_BMASK_IBM(2,7)
62 #define NEQE_PORT_NUMBER EHCA_BMASK_IBM(8,15)
63 #define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16,16)
65 #define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52,63)
66 #define ERROR_DATA_TYPE EHCA_BMASK_IBM(0,7)
68 static void queue_comp_task(struct ehca_cq *__cq);
70 static struct ehca_comp_pool* pool;
71 #ifdef CONFIG_HOTPLUG_CPU
72 static struct notifier_block comp_pool_callback_nb;
75 static inline void comp_event_callback(struct ehca_cq *cq)
77 if (!cq->ib_cq.comp_handler)
80 spin_lock(&cq->cb_lock);
81 cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
82 spin_unlock(&cq->cb_lock);
87 static void print_error_data(struct ehca_shca * shca, void* data,
88 u64* rblock, int length)
90 u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
91 u64 resource = rblock[1];
94 case 0x1: /* Queue Pair */
96 struct ehca_qp *qp = (struct ehca_qp*)data;
98 /* only print error data if AER is set */
102 ehca_err(&shca->ib_device,
103 "QP 0x%x (resource=%lx) has errors.",
104 qp->ib_qp.qp_num, resource);
107 case 0x4: /* Completion Queue */
109 struct ehca_cq *cq = (struct ehca_cq*)data;
111 ehca_err(&shca->ib_device,
112 "CQ 0x%x (resource=%lx) has errors.",
113 cq->cq_number, resource);
117 ehca_err(&shca->ib_device,
118 "Unknown errror type: %lx on %s.",
119 type, shca->ib_device.name);
123 ehca_err(&shca->ib_device, "Error data is available: %lx.", resource);
124 ehca_err(&shca->ib_device, "EHCA ----- error data begin "
125 "---------------------------------------------------");
126 ehca_dmp(rblock, length, "resource=%lx", resource);
127 ehca_err(&shca->ib_device, "EHCA ----- error data end "
128 "----------------------------------------------------");
133 int ehca_error_data(struct ehca_shca *shca, void *data,
139 unsigned long block_count;
141 rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
143 ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
148 /* rblock must be 4K aligned and should be 4K large */
149 ret = hipz_h_error_data(shca->ipz_hca_handle,
154 if (ret == H_R_STATE)
155 ehca_err(&shca->ib_device,
156 "No error data is available: %lx.", resource);
157 else if (ret == H_SUCCESS) {
160 length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
162 if (length > EHCA_PAGESIZE)
163 length = EHCA_PAGESIZE;
165 print_error_data(shca, data, rblock, length);
167 ehca_err(&shca->ib_device,
168 "Error data could not be fetched: %lx", resource);
170 ehca_free_fw_ctrlblock(rblock);
177 static void qp_event_callback(struct ehca_shca *shca,
179 enum ib_event_type event_type)
181 struct ib_event event;
183 u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
185 read_lock(&ehca_qp_idr_lock);
186 qp = idr_find(&ehca_qp_idr, token);
187 read_unlock(&ehca_qp_idr_lock);
193 ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
195 if (!qp->ib_qp.event_handler)
198 event.device = &shca->ib_device;
199 event.event = event_type;
200 event.element.qp = &qp->ib_qp;
202 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
207 static void cq_event_callback(struct ehca_shca *shca,
211 u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
213 read_lock(&ehca_cq_idr_lock);
214 cq = idr_find(&ehca_cq_idr, token);
216 atomic_inc(&cq->nr_events);
217 read_unlock(&ehca_cq_idr_lock);
222 ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
224 if (atomic_dec_and_test(&cq->nr_events))
225 wake_up(&cq->wait_completion);
230 static void parse_identifier(struct ehca_shca *shca, u64 eqe)
232 u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
234 switch (identifier) {
235 case 0x02: /* path migrated */
236 qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG);
238 case 0x03: /* communication established */
239 qp_event_callback(shca, eqe, IB_EVENT_COMM_EST);
241 case 0x04: /* send queue drained */
242 qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED);
244 case 0x05: /* QP error */
245 case 0x06: /* QP error */
246 qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL);
248 case 0x07: /* CQ error */
249 case 0x08: /* CQ error */
250 cq_event_callback(shca, eqe);
252 case 0x09: /* MRMWPTE error */
253 ehca_err(&shca->ib_device, "MRMWPTE error.");
255 case 0x0A: /* port event */
256 ehca_err(&shca->ib_device, "Port event.");
258 case 0x0B: /* MR access error */
259 ehca_err(&shca->ib_device, "MR access error.");
261 case 0x0C: /* EQ error */
262 ehca_err(&shca->ib_device, "EQ error.");
264 case 0x0D: /* P/Q_Key mismatch */
265 ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
267 case 0x10: /* sampling complete */
268 ehca_err(&shca->ib_device, "Sampling complete.");
270 case 0x11: /* unaffiliated access error */
271 ehca_err(&shca->ib_device, "Unaffiliated access error.");
273 case 0x12: /* path migrating error */
274 ehca_err(&shca->ib_device, "Path migration error.");
276 case 0x13: /* interface trace stopped */
277 ehca_err(&shca->ib_device, "Interface trace stopped.");
279 case 0x14: /* first error capture info available */
281 ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
282 identifier, shca->ib_device.name);
289 static void parse_ec(struct ehca_shca *shca, u64 eqe)
291 struct ib_event event;
292 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
293 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
296 case 0x30: /* port availability change */
297 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
298 ehca_info(&shca->ib_device,
299 "port %x is active.", port);
300 event.device = &shca->ib_device;
301 event.event = IB_EVENT_PORT_ACTIVE;
302 event.element.port_num = port;
303 shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
304 ib_dispatch_event(&event);
306 ehca_info(&shca->ib_device,
307 "port %x is inactive.", port);
308 event.device = &shca->ib_device;
309 event.event = IB_EVENT_PORT_ERR;
310 event.element.port_num = port;
311 shca->sport[port - 1].port_state = IB_PORT_DOWN;
312 ib_dispatch_event(&event);
316 /* port configuration change
317 * disruptive change is caused by
318 * LID, PKEY or SM change
320 ehca_warn(&shca->ib_device,
321 "disruptive port %x configuration change", port);
323 ehca_info(&shca->ib_device,
324 "port %x is inactive.", port);
325 event.device = &shca->ib_device;
326 event.event = IB_EVENT_PORT_ERR;
327 event.element.port_num = port;
328 shca->sport[port - 1].port_state = IB_PORT_DOWN;
329 ib_dispatch_event(&event);
331 ehca_info(&shca->ib_device,
332 "port %x is active.", port);
333 event.device = &shca->ib_device;
334 event.event = IB_EVENT_PORT_ACTIVE;
335 event.element.port_num = port;
336 shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
337 ib_dispatch_event(&event);
339 case 0x32: /* adapter malfunction */
340 ehca_err(&shca->ib_device, "Adapter malfunction.");
342 case 0x33: /* trace stopped */
343 ehca_err(&shca->ib_device, "Traced stopped.");
346 ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
347 ec, shca->ib_device.name);
354 static inline void reset_eq_pending(struct ehca_cq *cq)
357 struct h_galpa gal = cq->galpas.kernel;
359 hipz_galpa_store_cq(gal, cqx_ep, 0x0);
360 CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
365 irqreturn_t ehca_interrupt_neq(int irq, void *dev_id)
367 struct ehca_shca *shca = (struct ehca_shca*)dev_id;
369 tasklet_hi_schedule(&shca->neq.interrupt_task);
374 void ehca_tasklet_neq(unsigned long data)
376 struct ehca_shca *shca = (struct ehca_shca*)data;
377 struct ehca_eqe *eqe;
380 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
383 if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
384 parse_ec(shca, eqe->entry);
386 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
389 ret = hipz_h_reset_event(shca->ipz_hca_handle,
390 shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
392 if (ret != H_SUCCESS)
393 ehca_err(&shca->ib_device, "Can't clear notification events.");
398 irqreturn_t ehca_interrupt_eq(int irq, void *dev_id)
400 struct ehca_shca *shca = (struct ehca_shca*)dev_id;
402 tasklet_hi_schedule(&shca->eq.interrupt_task);
408 static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
414 eqe_value = eqe->entry;
415 ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);
416 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
417 ehca_dbg(&shca->ib_device, "Got completion event");
418 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
419 read_lock(&ehca_cq_idr_lock);
420 cq = idr_find(&ehca_cq_idr, token);
422 atomic_inc(&cq->nr_events);
423 read_unlock(&ehca_cq_idr_lock);
425 ehca_err(&shca->ib_device,
426 "Invalid eqe for non-existing cq token=%x",
430 reset_eq_pending(cq);
431 if (ehca_scaling_code)
434 comp_event_callback(cq);
435 if (atomic_dec_and_test(&cq->nr_events))
436 wake_up(&cq->wait_completion);
439 ehca_dbg(&shca->ib_device, "Got non completion event");
440 parse_identifier(shca, eqe_value);
444 void ehca_process_eq(struct ehca_shca *shca, int is_irq)
446 struct ehca_eq *eq = &shca->eq;
447 struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
453 spin_lock_irqsave(&eq->irq_spinlock, flags);
455 const int max_query_cnt = 100;
459 int_state = hipz_h_query_int_state(
460 shca->ipz_hca_handle, eq->ist);
463 } while (int_state && query_cnt < max_query_cnt);
464 if (unlikely((query_cnt == max_query_cnt)))
465 ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
466 int_state, query_cnt);
469 /* read out all eqes */
473 eqe_cache[eqe_cnt].eqe =
474 (struct ehca_eqe *)ehca_poll_eq(shca, eq);
475 if (!eqe_cache[eqe_cnt].eqe)
477 eqe_value = eqe_cache[eqe_cnt].eqe->entry;
478 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
479 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
480 read_lock(&ehca_cq_idr_lock);
481 eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
482 if (eqe_cache[eqe_cnt].cq)
483 atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
484 read_unlock(&ehca_cq_idr_lock);
485 if (!eqe_cache[eqe_cnt].cq) {
486 ehca_err(&shca->ib_device,
487 "Invalid eqe for non-existing cq "
492 eqe_cache[eqe_cnt].cq = NULL;
494 } while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
497 ehca_dbg(&shca->ib_device,
498 "No eqe found for irq event");
499 goto unlock_irq_spinlock;
501 ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
502 if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
503 ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
504 /* enable irq for new packets */
505 for (i = 0; i < eqe_cnt; i++) {
506 if (eq->eqe_cache[i].cq)
507 reset_eq_pending(eq->eqe_cache[i].cq);
510 spin_lock(&eq->spinlock);
511 eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
512 spin_unlock(&eq->spinlock);
513 /* call completion handler for cached eqes */
514 for (i = 0; i < eqe_cnt; i++)
515 if (eq->eqe_cache[i].cq) {
516 if (ehca_scaling_code)
517 queue_comp_task(eq->eqe_cache[i].cq);
519 struct ehca_cq *cq = eq->eqe_cache[i].cq;
520 comp_event_callback(cq);
521 if (atomic_dec_and_test(&cq->nr_events))
522 wake_up(&cq->wait_completion);
525 ehca_dbg(&shca->ib_device, "Got non completion event");
526 parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
528 /* poll eq if not empty */
530 goto unlock_irq_spinlock;
532 struct ehca_eqe *eqe;
533 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
536 process_eqe(shca, eqe);
540 spin_unlock_irqrestore(&eq->irq_spinlock, flags);
543 void ehca_tasklet_eq(unsigned long data)
545 ehca_process_eq((struct ehca_shca*)data, 1);
548 static inline int find_next_online_cpu(struct ehca_comp_pool* pool)
553 WARN_ON_ONCE(!in_interrupt());
554 if (ehca_debug_level)
555 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
557 spin_lock_irqsave(&pool->last_cpu_lock, flags);
558 cpu = next_cpu(pool->last_cpu, cpu_online_map);
560 cpu = first_cpu(cpu_online_map);
561 pool->last_cpu = cpu;
562 spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
567 static void __queue_comp_task(struct ehca_cq *__cq,
568 struct ehca_cpu_comp_task *cct)
572 spin_lock_irqsave(&cct->task_lock, flags);
573 spin_lock(&__cq->task_lock);
575 if (__cq->nr_callbacks == 0) {
576 __cq->nr_callbacks++;
577 list_add_tail(&__cq->entry, &cct->cq_list);
579 wake_up(&cct->wait_queue);
581 __cq->nr_callbacks++;
583 spin_unlock(&__cq->task_lock);
584 spin_unlock_irqrestore(&cct->task_lock, flags);
587 static void queue_comp_task(struct ehca_cq *__cq)
590 struct ehca_cpu_comp_task *cct;
594 cpu_id = find_next_online_cpu(pool);
595 BUG_ON(!cpu_online(cpu_id));
597 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
600 spin_lock_irqsave(&cct->task_lock, flags);
601 cq_jobs = cct->cq_jobs;
602 spin_unlock_irqrestore(&cct->task_lock, flags);
604 cpu_id = find_next_online_cpu(pool);
605 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
609 __queue_comp_task(__cq, cct);
612 static void run_comp_task(struct ehca_cpu_comp_task* cct)
617 spin_lock_irqsave(&cct->task_lock, flags);
619 while (!list_empty(&cct->cq_list)) {
620 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
621 spin_unlock_irqrestore(&cct->task_lock, flags);
623 comp_event_callback(cq);
624 if (atomic_dec_and_test(&cq->nr_events))
625 wake_up(&cq->wait_completion);
627 spin_lock_irqsave(&cct->task_lock, flags);
628 spin_lock(&cq->task_lock);
630 if (!cq->nr_callbacks) {
631 list_del_init(cct->cq_list.next);
634 spin_unlock(&cq->task_lock);
637 spin_unlock_irqrestore(&cct->task_lock, flags);
640 static int comp_task(void *__cct)
642 struct ehca_cpu_comp_task* cct = __cct;
644 DECLARE_WAITQUEUE(wait, current);
646 set_current_state(TASK_INTERRUPTIBLE);
647 while(!kthread_should_stop()) {
648 add_wait_queue(&cct->wait_queue, &wait);
650 spin_lock_irq(&cct->task_lock);
651 cql_empty = list_empty(&cct->cq_list);
652 spin_unlock_irq(&cct->task_lock);
656 __set_current_state(TASK_RUNNING);
658 remove_wait_queue(&cct->wait_queue, &wait);
660 spin_lock_irq(&cct->task_lock);
661 cql_empty = list_empty(&cct->cq_list);
662 spin_unlock_irq(&cct->task_lock);
664 run_comp_task(__cct);
666 set_current_state(TASK_INTERRUPTIBLE);
668 __set_current_state(TASK_RUNNING);
673 static struct task_struct *create_comp_task(struct ehca_comp_pool *pool,
676 struct ehca_cpu_comp_task *cct;
678 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
679 spin_lock_init(&cct->task_lock);
680 INIT_LIST_HEAD(&cct->cq_list);
681 init_waitqueue_head(&cct->wait_queue);
682 cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu);
687 static void destroy_comp_task(struct ehca_comp_pool *pool,
690 struct ehca_cpu_comp_task *cct;
691 struct task_struct *task;
692 unsigned long flags_cct;
694 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
696 spin_lock_irqsave(&cct->task_lock, flags_cct);
702 spin_unlock_irqrestore(&cct->task_lock, flags_cct);
708 #ifdef CONFIG_HOTPLUG_CPU
709 static void take_over_work(struct ehca_comp_pool *pool,
712 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
715 unsigned long flags_cct;
717 spin_lock_irqsave(&cct->task_lock, flags_cct);
719 list_splice_init(&cct->cq_list, &list);
721 while(!list_empty(&list)) {
722 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
724 list_del(&cq->entry);
725 __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
726 smp_processor_id()));
729 spin_unlock_irqrestore(&cct->task_lock, flags_cct);
733 static int comp_pool_callback(struct notifier_block *nfb,
734 unsigned long action,
737 unsigned int cpu = (unsigned long)hcpu;
738 struct ehca_cpu_comp_task *cct;
742 case CPU_UP_PREPARE_FROZEN:
743 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
744 if(!create_comp_task(pool, cpu)) {
745 ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
749 case CPU_UP_CANCELED:
750 case CPU_UP_CANCELED_FROZEN:
751 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
752 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
753 kthread_bind(cct->task, any_online_cpu(cpu_online_map));
754 destroy_comp_task(pool, cpu);
757 case CPU_ONLINE_FROZEN:
758 ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
759 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
760 kthread_bind(cct->task, cpu);
761 wake_up_process(cct->task);
763 case CPU_DOWN_PREPARE:
764 case CPU_DOWN_PREPARE_FROZEN:
765 ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
767 case CPU_DOWN_FAILED:
768 case CPU_DOWN_FAILED_FROZEN:
769 ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
772 case CPU_DEAD_FROZEN:
773 ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
774 destroy_comp_task(pool, cpu);
775 take_over_work(pool, cpu);
783 int ehca_create_comp_pool(void)
786 struct task_struct *task;
788 if (!ehca_scaling_code)
791 pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
795 spin_lock_init(&pool->last_cpu_lock);
796 pool->last_cpu = any_online_cpu(cpu_online_map);
798 pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
799 if (pool->cpu_comp_tasks == NULL) {
804 for_each_online_cpu(cpu) {
805 task = create_comp_task(pool, cpu);
807 kthread_bind(task, cpu);
808 wake_up_process(task);
812 #ifdef CONFIG_HOTPLUG_CPU
813 comp_pool_callback_nb.notifier_call = comp_pool_callback;
814 comp_pool_callback_nb.priority =0;
815 register_cpu_notifier(&comp_pool_callback_nb);
818 printk(KERN_INFO "eHCA scaling code enabled\n");
823 void ehca_destroy_comp_pool(void)
827 if (!ehca_scaling_code)
830 #ifdef CONFIG_HOTPLUG_CPU
831 unregister_cpu_notifier(&comp_pool_callback_nb);
834 for (i = 0; i < NR_CPUS; i++) {
836 destroy_comp_task(pool, i);
838 free_percpu(pool->cpu_comp_tasks);