4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
11 * Copyright 2002 MontaVista Software Inc.
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You should have received a copy of the GNU General Public License along
31 * with this program; if not, write to the Free Software Foundation, Inc.,
32 * 675 Mass Ave, Cambridge, MA 02139, USA.
36 * This file holds the "policy" for the interface to the SMI state
37 * machine. It does the configuration, handles timers and interrupts,
38 * and drives the real SMI state machine.
41 #include <linux/config.h>
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/notifier.h>
55 #include <linux/mutex.h>
56 #include <linux/kthread.h>
58 #ifdef CONFIG_HIGH_RES_TIMERS
59 #include <linux/hrtime.h>
60 # if defined(schedule_next_int)
61 /* Old high-res timer code, do translations. */
62 # define get_arch_cycles(a) quick_update_jiffies_sub(a)
63 # define arch_cycles_per_jiffy cycles_per_jiffies
65 static inline void add_usec_to_timer(struct timer_list *t, long v)
67 t->arch_cycle_expires += nsec_to_arch_cycle(v * 1000);
68 while (t->arch_cycle_expires >= arch_cycles_per_jiffy)
71 t->arch_cycle_expires -= arch_cycles_per_jiffy;
75 #include <linux/interrupt.h>
76 #include <linux/rcupdate.h>
77 #include <linux/ipmi_smi.h>
79 #include "ipmi_si_sm.h"
80 #include <linux/init.h>
81 #include <linux/dmi.h>
83 /* Measure times between events in the driver. */
86 /* Call every 10 ms. */
87 #define SI_TIMEOUT_TIME_USEC 10000
88 #define SI_USEC_PER_JIFFY (1000000/HZ)
89 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
90 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
98 SI_CLEARING_FLAGS_THEN_SET_IRQ,
100 SI_ENABLE_INTERRUPTS1,
101 SI_ENABLE_INTERRUPTS2
102 /* FIXME - add watchdog stuff. */
105 /* Some BT-specific defines we need here. */
106 #define IPMI_BT_INTMASK_REG 2
107 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
108 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
111 SI_KCS, SI_SMIC, SI_BT
113 static char *si_to_str[] = { "KCS", "SMIC", "BT" };
115 struct ipmi_device_id {
116 unsigned char device_id;
117 unsigned char device_revision;
118 unsigned char firmware_revision_1;
119 unsigned char firmware_revision_2;
120 unsigned char ipmi_version;
121 unsigned char additional_device_support;
122 unsigned char manufacturer_id[3];
123 unsigned char product_id[2];
124 unsigned char aux_firmware_revision[4];
125 } __attribute__((packed));
127 #define ipmi_version_major(v) ((v)->ipmi_version & 0xf)
128 #define ipmi_version_minor(v) ((v)->ipmi_version >> 4)
134 struct si_sm_data *si_sm;
135 struct si_sm_handlers *handlers;
136 enum si_type si_type;
139 struct list_head xmit_msgs;
140 struct list_head hp_xmit_msgs;
141 struct ipmi_smi_msg *curr_msg;
142 enum si_intf_state si_state;
144 /* Used to handle the various types of I/O that can occur with
147 int (*io_setup)(struct smi_info *info);
148 void (*io_cleanup)(struct smi_info *info);
149 int (*irq_setup)(struct smi_info *info);
150 void (*irq_cleanup)(struct smi_info *info);
151 unsigned int io_size;
152 char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
153 void (*addr_source_cleanup)(struct smi_info *info);
154 void *addr_source_data;
156 /* Per-OEM handler, called from handle_flags().
157 Returns 1 when handle_flags() needs to be re-run
158 or 0 indicating it set si_state itself.
160 int (*oem_data_avail_handler)(struct smi_info *smi_info);
162 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
163 is set to hold the flags until we are done handling everything
165 #define RECEIVE_MSG_AVAIL 0x01
166 #define EVENT_MSG_BUFFER_FULL 0x02
167 #define WDT_PRE_TIMEOUT_INT 0x08
168 #define OEM0_DATA_AVAIL 0x20
169 #define OEM1_DATA_AVAIL 0x40
170 #define OEM2_DATA_AVAIL 0x80
171 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
174 unsigned char msg_flags;
176 /* If set to true, this will request events the next time the
177 state machine is idle. */
180 /* If true, run the state machine to completion on every send
181 call. Generally used after a panic to make sure stuff goes
183 int run_to_completion;
185 /* The I/O port of an SI interface. */
188 /* The space between start addresses of the two ports. For
189 instance, if the first port is 0xca2 and the spacing is 4, then
190 the second port is 0xca6. */
191 unsigned int spacing;
193 /* zero if no irq; */
196 /* The timer for this si. */
197 struct timer_list si_timer;
199 /* The time (in jiffies) the last timeout occurred at. */
200 unsigned long last_timeout_jiffies;
202 /* Used to gracefully stop the timer without race conditions. */
203 atomic_t stop_operation;
205 /* The driver will disable interrupts when it gets into a
206 situation where it cannot handle messages due to lack of
207 memory. Once that situation clears up, it will re-enable
209 int interrupt_disabled;
211 struct ipmi_device_id device_id;
213 /* Slave address, could be reported from DMI. */
214 unsigned char slave_addr;
216 /* Counters and things for the proc filesystem. */
217 spinlock_t count_lock;
218 unsigned long short_timeouts;
219 unsigned long long_timeouts;
220 unsigned long timeout_restarts;
222 unsigned long interrupts;
223 unsigned long attentions;
224 unsigned long flag_fetches;
225 unsigned long hosed_count;
226 unsigned long complete_transactions;
227 unsigned long events;
228 unsigned long watchdog_pretimeouts;
229 unsigned long incoming_messages;
231 struct task_struct *thread;
233 struct list_head link;
236 static int try_smi_init(struct smi_info *smi);
238 static struct notifier_block *xaction_notifier_list;
239 static int register_xaction_notifier(struct notifier_block * nb)
241 return notifier_chain_register(&xaction_notifier_list, nb);
244 static void si_restart_short_timer(struct smi_info *smi_info);
246 static void deliver_recv_msg(struct smi_info *smi_info,
247 struct ipmi_smi_msg *msg)
249 /* Deliver the message to the upper layer with the lock
251 spin_unlock(&(smi_info->si_lock));
252 ipmi_smi_msg_received(smi_info->intf, msg);
253 spin_lock(&(smi_info->si_lock));
256 static void return_hosed_msg(struct smi_info *smi_info)
258 struct ipmi_smi_msg *msg = smi_info->curr_msg;
260 /* Make it a reponse */
261 msg->rsp[0] = msg->data[0] | 4;
262 msg->rsp[1] = msg->data[1];
263 msg->rsp[2] = 0xFF; /* Unknown error. */
266 smi_info->curr_msg = NULL;
267 deliver_recv_msg(smi_info, msg);
270 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
273 struct list_head *entry = NULL;
278 /* No need to save flags, we aleady have interrupts off and we
279 already hold the SMI lock. */
280 spin_lock(&(smi_info->msg_lock));
282 /* Pick the high priority queue first. */
283 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
284 entry = smi_info->hp_xmit_msgs.next;
285 } else if (!list_empty(&(smi_info->xmit_msgs))) {
286 entry = smi_info->xmit_msgs.next;
290 smi_info->curr_msg = NULL;
296 smi_info->curr_msg = list_entry(entry,
301 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
303 err = notifier_call_chain(&xaction_notifier_list, 0, smi_info);
304 if (err & NOTIFY_STOP_MASK) {
305 rv = SI_SM_CALL_WITHOUT_DELAY;
308 err = smi_info->handlers->start_transaction(
310 smi_info->curr_msg->data,
311 smi_info->curr_msg->data_size);
313 return_hosed_msg(smi_info);
316 rv = SI_SM_CALL_WITHOUT_DELAY;
319 spin_unlock(&(smi_info->msg_lock));
324 static void start_enable_irq(struct smi_info *smi_info)
326 unsigned char msg[2];
328 /* If we are enabling interrupts, we have to tell the
330 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
331 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
333 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
334 smi_info->si_state = SI_ENABLE_INTERRUPTS1;
337 static void start_clear_flags(struct smi_info *smi_info)
339 unsigned char msg[3];
341 /* Make sure the watchdog pre-timeout flag is not set at startup. */
342 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
343 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
344 msg[2] = WDT_PRE_TIMEOUT_INT;
346 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
347 smi_info->si_state = SI_CLEARING_FLAGS;
350 /* When we have a situtaion where we run out of memory and cannot
351 allocate messages, we just leave them in the BMC and run the system
352 polled until we can allocate some memory. Once we have some
353 memory, we will re-enable the interrupt. */
354 static inline void disable_si_irq(struct smi_info *smi_info)
356 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
357 disable_irq_nosync(smi_info->irq);
358 smi_info->interrupt_disabled = 1;
362 static inline void enable_si_irq(struct smi_info *smi_info)
364 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
365 enable_irq(smi_info->irq);
366 smi_info->interrupt_disabled = 0;
370 static void handle_flags(struct smi_info *smi_info)
373 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
374 /* Watchdog pre-timeout */
375 spin_lock(&smi_info->count_lock);
376 smi_info->watchdog_pretimeouts++;
377 spin_unlock(&smi_info->count_lock);
379 start_clear_flags(smi_info);
380 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
381 spin_unlock(&(smi_info->si_lock));
382 ipmi_smi_watchdog_pretimeout(smi_info->intf);
383 spin_lock(&(smi_info->si_lock));
384 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
385 /* Messages available. */
386 smi_info->curr_msg = ipmi_alloc_smi_msg();
387 if (!smi_info->curr_msg) {
388 disable_si_irq(smi_info);
389 smi_info->si_state = SI_NORMAL;
392 enable_si_irq(smi_info);
394 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
395 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
396 smi_info->curr_msg->data_size = 2;
398 smi_info->handlers->start_transaction(
400 smi_info->curr_msg->data,
401 smi_info->curr_msg->data_size);
402 smi_info->si_state = SI_GETTING_MESSAGES;
403 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
404 /* Events available. */
405 smi_info->curr_msg = ipmi_alloc_smi_msg();
406 if (!smi_info->curr_msg) {
407 disable_si_irq(smi_info);
408 smi_info->si_state = SI_NORMAL;
411 enable_si_irq(smi_info);
413 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
414 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
415 smi_info->curr_msg->data_size = 2;
417 smi_info->handlers->start_transaction(
419 smi_info->curr_msg->data,
420 smi_info->curr_msg->data_size);
421 smi_info->si_state = SI_GETTING_EVENTS;
422 } else if (smi_info->msg_flags & OEM_DATA_AVAIL) {
423 if (smi_info->oem_data_avail_handler)
424 if (smi_info->oem_data_avail_handler(smi_info))
427 smi_info->si_state = SI_NORMAL;
431 static void handle_transaction_done(struct smi_info *smi_info)
433 struct ipmi_smi_msg *msg;
438 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
440 switch (smi_info->si_state) {
442 if (!smi_info->curr_msg)
445 smi_info->curr_msg->rsp_size
446 = smi_info->handlers->get_result(
448 smi_info->curr_msg->rsp,
449 IPMI_MAX_MSG_LENGTH);
451 /* Do this here becase deliver_recv_msg() releases the
452 lock, and a new message can be put in during the
453 time the lock is released. */
454 msg = smi_info->curr_msg;
455 smi_info->curr_msg = NULL;
456 deliver_recv_msg(smi_info, msg);
459 case SI_GETTING_FLAGS:
461 unsigned char msg[4];
464 /* We got the flags from the SMI, now handle them. */
465 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
467 /* Error fetching flags, just give up for
469 smi_info->si_state = SI_NORMAL;
470 } else if (len < 4) {
471 /* Hmm, no flags. That's technically illegal, but
472 don't use uninitialized data. */
473 smi_info->si_state = SI_NORMAL;
475 smi_info->msg_flags = msg[3];
476 handle_flags(smi_info);
481 case SI_CLEARING_FLAGS:
482 case SI_CLEARING_FLAGS_THEN_SET_IRQ:
484 unsigned char msg[3];
486 /* We cleared the flags. */
487 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
489 /* Error clearing flags */
491 "ipmi_si: Error clearing flags: %2.2x\n",
494 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
495 start_enable_irq(smi_info);
497 smi_info->si_state = SI_NORMAL;
501 case SI_GETTING_EVENTS:
503 smi_info->curr_msg->rsp_size
504 = smi_info->handlers->get_result(
506 smi_info->curr_msg->rsp,
507 IPMI_MAX_MSG_LENGTH);
509 /* Do this here becase deliver_recv_msg() releases the
510 lock, and a new message can be put in during the
511 time the lock is released. */
512 msg = smi_info->curr_msg;
513 smi_info->curr_msg = NULL;
514 if (msg->rsp[2] != 0) {
515 /* Error getting event, probably done. */
518 /* Take off the event flag. */
519 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
520 handle_flags(smi_info);
522 spin_lock(&smi_info->count_lock);
524 spin_unlock(&smi_info->count_lock);
526 /* Do this before we deliver the message
527 because delivering the message releases the
528 lock and something else can mess with the
530 handle_flags(smi_info);
532 deliver_recv_msg(smi_info, msg);
537 case SI_GETTING_MESSAGES:
539 smi_info->curr_msg->rsp_size
540 = smi_info->handlers->get_result(
542 smi_info->curr_msg->rsp,
543 IPMI_MAX_MSG_LENGTH);
545 /* Do this here becase deliver_recv_msg() releases the
546 lock, and a new message can be put in during the
547 time the lock is released. */
548 msg = smi_info->curr_msg;
549 smi_info->curr_msg = NULL;
550 if (msg->rsp[2] != 0) {
551 /* Error getting event, probably done. */
554 /* Take off the msg flag. */
555 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
556 handle_flags(smi_info);
558 spin_lock(&smi_info->count_lock);
559 smi_info->incoming_messages++;
560 spin_unlock(&smi_info->count_lock);
562 /* Do this before we deliver the message
563 because delivering the message releases the
564 lock and something else can mess with the
566 handle_flags(smi_info);
568 deliver_recv_msg(smi_info, msg);
573 case SI_ENABLE_INTERRUPTS1:
575 unsigned char msg[4];
577 /* We got the flags from the SMI, now handle them. */
578 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
581 "ipmi_si: Could not enable interrupts"
582 ", failed get, using polled mode.\n");
583 smi_info->si_state = SI_NORMAL;
585 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
586 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
587 msg[2] = msg[3] | 1; /* enable msg queue int */
588 smi_info->handlers->start_transaction(
589 smi_info->si_sm, msg, 3);
590 smi_info->si_state = SI_ENABLE_INTERRUPTS2;
595 case SI_ENABLE_INTERRUPTS2:
597 unsigned char msg[4];
599 /* We got the flags from the SMI, now handle them. */
600 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
603 "ipmi_si: Could not enable interrupts"
604 ", failed set, using polled mode.\n");
606 smi_info->si_state = SI_NORMAL;
612 /* Called on timeouts and events. Timeouts should pass the elapsed
613 time, interrupts should pass in zero. */
614 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
617 enum si_sm_result si_sm_result;
620 /* There used to be a loop here that waited a little while
621 (around 25us) before giving up. That turned out to be
622 pointless, the minimum delays I was seeing were in the 300us
623 range, which is far too long to wait in an interrupt. So
624 we just run until the state machine tells us something
625 happened or it needs a delay. */
626 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
628 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
630 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
633 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
635 spin_lock(&smi_info->count_lock);
636 smi_info->complete_transactions++;
637 spin_unlock(&smi_info->count_lock);
639 handle_transaction_done(smi_info);
640 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
642 else if (si_sm_result == SI_SM_HOSED)
644 spin_lock(&smi_info->count_lock);
645 smi_info->hosed_count++;
646 spin_unlock(&smi_info->count_lock);
648 /* Do the before return_hosed_msg, because that
649 releases the lock. */
650 smi_info->si_state = SI_NORMAL;
651 if (smi_info->curr_msg != NULL) {
652 /* If we were handling a user message, format
653 a response to send to the upper layer to
654 tell it about the error. */
655 return_hosed_msg(smi_info);
657 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
660 /* We prefer handling attn over new messages. */
661 if (si_sm_result == SI_SM_ATTN)
663 unsigned char msg[2];
665 spin_lock(&smi_info->count_lock);
666 smi_info->attentions++;
667 spin_unlock(&smi_info->count_lock);
669 /* Got a attn, send down a get message flags to see
670 what's causing it. It would be better to handle
671 this in the upper layer, but due to the way
672 interrupts work with the SMI, that's not really
674 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
675 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
677 smi_info->handlers->start_transaction(
678 smi_info->si_sm, msg, 2);
679 smi_info->si_state = SI_GETTING_FLAGS;
683 /* If we are currently idle, try to start the next message. */
684 if (si_sm_result == SI_SM_IDLE) {
685 spin_lock(&smi_info->count_lock);
687 spin_unlock(&smi_info->count_lock);
689 si_sm_result = start_next_msg(smi_info);
690 if (si_sm_result != SI_SM_IDLE)
694 if ((si_sm_result == SI_SM_IDLE)
695 && (atomic_read(&smi_info->req_events)))
697 /* We are idle and the upper layer requested that I fetch
699 unsigned char msg[2];
701 spin_lock(&smi_info->count_lock);
702 smi_info->flag_fetches++;
703 spin_unlock(&smi_info->count_lock);
705 atomic_set(&smi_info->req_events, 0);
706 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
707 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
709 smi_info->handlers->start_transaction(
710 smi_info->si_sm, msg, 2);
711 smi_info->si_state = SI_GETTING_FLAGS;
718 static void sender(void *send_info,
719 struct ipmi_smi_msg *msg,
722 struct smi_info *smi_info = send_info;
723 enum si_sm_result result;
729 spin_lock_irqsave(&(smi_info->msg_lock), flags);
732 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
735 if (smi_info->run_to_completion) {
736 /* If we are running to completion, then throw it in
737 the list and run transactions until everything is
738 clear. Priority doesn't matter here. */
739 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
741 /* We have to release the msg lock and claim the smi
742 lock in this case, because of race conditions. */
743 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
745 spin_lock_irqsave(&(smi_info->si_lock), flags);
746 result = smi_event_handler(smi_info, 0);
747 while (result != SI_SM_IDLE) {
748 udelay(SI_SHORT_TIMEOUT_USEC);
749 result = smi_event_handler(smi_info,
750 SI_SHORT_TIMEOUT_USEC);
752 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
756 list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
758 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
761 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
763 spin_lock_irqsave(&(smi_info->si_lock), flags);
764 if ((smi_info->si_state == SI_NORMAL)
765 && (smi_info->curr_msg == NULL))
767 start_next_msg(smi_info);
768 si_restart_short_timer(smi_info);
770 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
773 static void set_run_to_completion(void *send_info, int i_run_to_completion)
775 struct smi_info *smi_info = send_info;
776 enum si_sm_result result;
779 spin_lock_irqsave(&(smi_info->si_lock), flags);
781 smi_info->run_to_completion = i_run_to_completion;
782 if (i_run_to_completion) {
783 result = smi_event_handler(smi_info, 0);
784 while (result != SI_SM_IDLE) {
785 udelay(SI_SHORT_TIMEOUT_USEC);
786 result = smi_event_handler(smi_info,
787 SI_SHORT_TIMEOUT_USEC);
791 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
794 static int ipmi_thread(void *data)
796 struct smi_info *smi_info = data;
798 enum si_sm_result smi_result;
800 set_user_nice(current, 19);
801 while (!kthread_should_stop()) {
802 spin_lock_irqsave(&(smi_info->si_lock), flags);
803 smi_result=smi_event_handler(smi_info, 0);
804 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
805 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
808 else if (smi_result == SI_SM_CALL_WITH_DELAY)
811 schedule_timeout_interruptible(1);
817 static void poll(void *send_info)
819 struct smi_info *smi_info = send_info;
821 smi_event_handler(smi_info, 0);
824 static void request_events(void *send_info)
826 struct smi_info *smi_info = send_info;
828 atomic_set(&smi_info->req_events, 1);
831 static int initialized = 0;
833 /* Must be called with interrupts off and with the si_lock held. */
834 static void si_restart_short_timer(struct smi_info *smi_info)
836 #if defined(CONFIG_HIGH_RES_TIMERS)
838 unsigned long jiffies_now;
841 if (del_timer(&(smi_info->si_timer))) {
842 /* If we don't delete the timer, then it will go off
843 immediately, anyway. So we only process if we
844 actually delete the timer. */
847 seq = read_seqbegin_irqsave(&xtime_lock, flags);
848 jiffies_now = jiffies;
849 smi_info->si_timer.expires = jiffies_now;
850 smi_info->si_timer.arch_cycle_expires
851 = get_arch_cycles(jiffies_now);
852 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
854 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
856 add_timer(&(smi_info->si_timer));
857 spin_lock_irqsave(&smi_info->count_lock, flags);
858 smi_info->timeout_restarts++;
859 spin_unlock_irqrestore(&smi_info->count_lock, flags);
864 static void smi_timeout(unsigned long data)
866 struct smi_info *smi_info = (struct smi_info *) data;
867 enum si_sm_result smi_result;
869 unsigned long jiffies_now;
875 if (atomic_read(&smi_info->stop_operation))
878 spin_lock_irqsave(&(smi_info->si_lock), flags);
881 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
883 jiffies_now = jiffies;
884 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
885 * SI_USEC_PER_JIFFY);
886 smi_result = smi_event_handler(smi_info, time_diff);
888 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
890 smi_info->last_timeout_jiffies = jiffies_now;
892 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
893 /* Running with interrupts, only do long timeouts. */
894 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
895 spin_lock_irqsave(&smi_info->count_lock, flags);
896 smi_info->long_timeouts++;
897 spin_unlock_irqrestore(&smi_info->count_lock, flags);
901 /* If the state machine asks for a short delay, then shorten
902 the timer timeout. */
903 if (smi_result == SI_SM_CALL_WITH_DELAY) {
904 #if defined(CONFIG_HIGH_RES_TIMERS)
907 spin_lock_irqsave(&smi_info->count_lock, flags);
908 smi_info->short_timeouts++;
909 spin_unlock_irqrestore(&smi_info->count_lock, flags);
910 #if defined(CONFIG_HIGH_RES_TIMERS)
912 seq = read_seqbegin_irqsave(&xtime_lock, flags);
913 smi_info->si_timer.expires = jiffies;
914 smi_info->si_timer.arch_cycle_expires
915 = get_arch_cycles(smi_info->si_timer.expires);
916 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
917 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
919 smi_info->si_timer.expires = jiffies + 1;
922 spin_lock_irqsave(&smi_info->count_lock, flags);
923 smi_info->long_timeouts++;
924 spin_unlock_irqrestore(&smi_info->count_lock, flags);
925 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
926 #if defined(CONFIG_HIGH_RES_TIMERS)
927 smi_info->si_timer.arch_cycle_expires = 0;
932 add_timer(&(smi_info->si_timer));
935 static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
937 struct smi_info *smi_info = data;
943 spin_lock_irqsave(&(smi_info->si_lock), flags);
945 spin_lock(&smi_info->count_lock);
946 smi_info->interrupts++;
947 spin_unlock(&smi_info->count_lock);
949 if (atomic_read(&smi_info->stop_operation))
954 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
956 smi_event_handler(smi_info, 0);
958 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
962 static irqreturn_t si_bt_irq_handler(int irq, void *data, struct pt_regs *regs)
964 struct smi_info *smi_info = data;
965 /* We need to clear the IRQ flag for the BT interface. */
966 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
967 IPMI_BT_INTMASK_CLEAR_IRQ_BIT
968 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
969 return si_irq_handler(irq, data, regs);
973 static struct ipmi_smi_handlers handlers =
975 .owner = THIS_MODULE,
977 .request_events = request_events,
978 .set_run_to_completion = set_run_to_completion,
982 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
983 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
985 #define SI_MAX_PARMS 4
986 static LIST_HEAD(smi_infos);
987 static DECLARE_MUTEX(smi_infos_lock);
988 static int smi_num; /* Used to sequence the SMIs */
990 #define DEVICE_NAME "ipmi_si"
992 #define DEFAULT_REGSPACING 1
994 static int si_trydefaults = 1;
995 static char *si_type[SI_MAX_PARMS];
996 #define MAX_SI_TYPE_STR 30
997 static char si_type_str[MAX_SI_TYPE_STR];
998 static unsigned long addrs[SI_MAX_PARMS];
999 static int num_addrs;
1000 static unsigned int ports[SI_MAX_PARMS];
1001 static int num_ports;
1002 static int irqs[SI_MAX_PARMS];
1003 static int num_irqs;
1004 static int regspacings[SI_MAX_PARMS];
1005 static int num_regspacings = 0;
1006 static int regsizes[SI_MAX_PARMS];
1007 static int num_regsizes = 0;
1008 static int regshifts[SI_MAX_PARMS];
1009 static int num_regshifts = 0;
1010 static int slave_addrs[SI_MAX_PARMS];
1011 static int num_slave_addrs = 0;
1014 module_param_named(trydefaults, si_trydefaults, bool, 0);
1015 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1016 " default scan of the KCS and SMIC interface at the standard"
1018 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1019 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1020 " interface separated by commas. The types are 'kcs',"
1021 " 'smic', and 'bt'. For example si_type=kcs,bt will set"
1022 " the first interface to kcs and the second to bt");
1023 module_param_array(addrs, long, &num_addrs, 0);
1024 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1025 " addresses separated by commas. Only use if an interface"
1026 " is in memory. Otherwise, set it to zero or leave"
1028 module_param_array(ports, int, &num_ports, 0);
1029 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1030 " addresses separated by commas. Only use if an interface"
1031 " is a port. Otherwise, set it to zero or leave"
1033 module_param_array(irqs, int, &num_irqs, 0);
1034 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1035 " addresses separated by commas. Only use if an interface"
1036 " has an interrupt. Otherwise, set it to zero or leave"
1038 module_param_array(regspacings, int, &num_regspacings, 0);
1039 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1040 " and each successive register used by the interface. For"
1041 " instance, if the start address is 0xca2 and the spacing"
1042 " is 2, then the second address is at 0xca4. Defaults"
1044 module_param_array(regsizes, int, &num_regsizes, 0);
1045 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1046 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1047 " 16-bit, 32-bit, or 64-bit register. Use this if you"
1048 " the 8-bit IPMI register has to be read from a larger"
1050 module_param_array(regshifts, int, &num_regshifts, 0);
1051 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1052 " IPMI register, in bits. For instance, if the data"
1053 " is read from a 32-bit word and the IPMI data is in"
1054 " bit 8-15, then the shift would be 8");
1055 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1056 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1057 " the controller. Normally this is 0x20, but can be"
1058 " overridden by this parm. This is an array indexed"
1059 " by interface number.");
1062 #define IPMI_IO_ADDR_SPACE 0
1063 #define IPMI_MEM_ADDR_SPACE 1
1064 static char *addr_space_to_str[] = { "I/O", "memory" };
1066 static void std_irq_cleanup(struct smi_info *info)
1068 if (info->si_type == SI_BT)
1069 /* Disable the interrupt in the BT interface. */
1070 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1071 free_irq(info->irq, info);
1074 static int std_irq_setup(struct smi_info *info)
1081 if (info->si_type == SI_BT) {
1082 rv = request_irq(info->irq,
1088 /* Enable the interrupt in the BT interface. */
1089 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1090 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1092 rv = request_irq(info->irq,
1099 "ipmi_si: %s unable to claim interrupt %d,"
1100 " running polled\n",
1101 DEVICE_NAME, info->irq);
1104 info->irq_cleanup = std_irq_cleanup;
1105 printk(" Using irq %d\n", info->irq);
1111 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1113 unsigned int addr = io->addr_data;
1115 return inb(addr + (offset * io->regspacing));
1118 static void port_outb(struct si_sm_io *io, unsigned int offset,
1121 unsigned int addr = io->addr_data;
1123 outb(b, addr + (offset * io->regspacing));
1126 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1128 unsigned int addr = io->addr_data;
1130 return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1133 static void port_outw(struct si_sm_io *io, unsigned int offset,
1136 unsigned int addr = io->addr_data;
1138 outw(b << io->regshift, addr + (offset * io->regspacing));
1141 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1143 unsigned int addr = io->addr_data;
1145 return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1148 static void port_outl(struct si_sm_io *io, unsigned int offset,
1151 unsigned int addr = io->addr_data;
1153 outl(b << io->regshift, addr+(offset * io->regspacing));
1156 static void port_cleanup(struct smi_info *info)
1158 unsigned int addr = info->io.addr_data;
1162 mapsize = ((info->io_size * info->io.regspacing)
1163 - (info->io.regspacing - info->io.regsize));
1165 release_region (addr, mapsize);
1170 static int port_setup(struct smi_info *info)
1172 unsigned int addr = info->io.addr_data;
1178 info->io_cleanup = port_cleanup;
1180 /* Figure out the actual inb/inw/inl/etc routine to use based
1181 upon the register size. */
1182 switch (info->io.regsize) {
1184 info->io.inputb = port_inb;
1185 info->io.outputb = port_outb;
1188 info->io.inputb = port_inw;
1189 info->io.outputb = port_outw;
1192 info->io.inputb = port_inl;
1193 info->io.outputb = port_outl;
1196 printk("ipmi_si: Invalid register size: %d\n",
1201 /* Calculate the total amount of memory to claim. This is an
1202 * unusual looking calculation, but it avoids claiming any
1203 * more memory than it has to. It will claim everything
1204 * between the first address to the end of the last full
1206 mapsize = ((info->io_size * info->io.regspacing)
1207 - (info->io.regspacing - info->io.regsize));
1209 if (request_region(addr, mapsize, DEVICE_NAME) == NULL)
1214 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1216 return readb((io->addr)+(offset * io->regspacing));
1219 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1222 writeb(b, (io->addr)+(offset * io->regspacing));
1225 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1227 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1231 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1234 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1237 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1239 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1243 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1246 writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1250 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1252 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1256 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1259 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1263 static void mem_cleanup(struct smi_info *info)
1265 unsigned long addr = info->io.addr_data;
1268 if (info->io.addr) {
1269 iounmap(info->io.addr);
1271 mapsize = ((info->io_size * info->io.regspacing)
1272 - (info->io.regspacing - info->io.regsize));
1274 release_mem_region(addr, mapsize);
1279 static int mem_setup(struct smi_info *info)
1281 unsigned long addr = info->io.addr_data;
1287 info->io_cleanup = mem_cleanup;
1289 /* Figure out the actual readb/readw/readl/etc routine to use based
1290 upon the register size. */
1291 switch (info->io.regsize) {
1293 info->io.inputb = intf_mem_inb;
1294 info->io.outputb = intf_mem_outb;
1297 info->io.inputb = intf_mem_inw;
1298 info->io.outputb = intf_mem_outw;
1301 info->io.inputb = intf_mem_inl;
1302 info->io.outputb = intf_mem_outl;
1306 info->io.inputb = mem_inq;
1307 info->io.outputb = mem_outq;
1311 printk("ipmi_si: Invalid register size: %d\n",
1316 /* Calculate the total amount of memory to claim. This is an
1317 * unusual looking calculation, but it avoids claiming any
1318 * more memory than it has to. It will claim everything
1319 * between the first address to the end of the last full
1321 mapsize = ((info->io_size * info->io.regspacing)
1322 - (info->io.regspacing - info->io.regsize));
1324 if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1327 info->io.addr = ioremap(addr, mapsize);
1328 if (info->io.addr == NULL) {
1329 release_mem_region(addr, mapsize);
1336 static __devinit void hardcode_find_bmc(void)
1339 struct smi_info *info;
1341 for (i = 0; i < SI_MAX_PARMS; i++) {
1342 if (!ports[i] && !addrs[i])
1345 info = kzalloc(sizeof(*info), GFP_KERNEL);
1349 info->addr_source = "hardcoded";
1351 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1352 info->si_type = SI_KCS;
1353 } else if (strcmp(si_type[i], "smic") == 0) {
1354 info->si_type = SI_SMIC;
1355 } else if (strcmp(si_type[i], "bt") == 0) {
1356 info->si_type = SI_BT;
1359 "ipmi_si: Interface type specified "
1360 "for interface %d, was invalid: %s\n",
1368 info->io_setup = port_setup;
1369 info->io.addr_data = ports[i];
1370 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1371 } else if (addrs[i]) {
1373 info->io_setup = mem_setup;
1374 info->io.addr_data = addrs[i];
1375 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1378 "ipmi_si: Interface type specified "
1379 "for interface %d, "
1380 "but port and address were not set or "
1381 "set to zero.\n", i);
1386 info->io.addr = NULL;
1387 info->io.regspacing = regspacings[i];
1388 if (!info->io.regspacing)
1389 info->io.regspacing = DEFAULT_REGSPACING;
1390 info->io.regsize = regsizes[i];
1391 if (!info->io.regsize)
1392 info->io.regsize = DEFAULT_REGSPACING;
1393 info->io.regshift = regshifts[i];
1394 info->irq = irqs[i];
1396 info->irq_setup = std_irq_setup;
1404 #include <linux/acpi.h>
1406 /* Once we get an ACPI failure, we don't try any more, because we go
1407 through the tables sequentially. Once we don't find a table, there
1409 static int acpi_failure = 0;
1411 /* For GPE-type interrupts. */
1412 static u32 ipmi_acpi_gpe(void *context)
1414 struct smi_info *smi_info = context;
1415 unsigned long flags;
1420 spin_lock_irqsave(&(smi_info->si_lock), flags);
1422 spin_lock(&smi_info->count_lock);
1423 smi_info->interrupts++;
1424 spin_unlock(&smi_info->count_lock);
1426 if (atomic_read(&smi_info->stop_operation))
1430 do_gettimeofday(&t);
1431 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1433 smi_event_handler(smi_info, 0);
1435 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1437 return ACPI_INTERRUPT_HANDLED;
1440 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1445 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1448 static int acpi_gpe_irq_setup(struct smi_info *info)
1455 /* FIXME - is level triggered right? */
1456 status = acpi_install_gpe_handler(NULL,
1458 ACPI_GPE_LEVEL_TRIGGERED,
1461 if (status != AE_OK) {
1463 "ipmi_si: %s unable to claim ACPI GPE %d,"
1464 " running polled\n",
1465 DEVICE_NAME, info->irq);
1469 info->irq_cleanup = acpi_gpe_irq_cleanup;
1470 printk(" Using ACPI GPE %d\n", info->irq);
1477 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1488 s8 CreatorRevision[4];
1491 s16 SpecificationRevision;
1494 * Bit 0 - SCI interrupt supported
1495 * Bit 1 - I/O APIC/SAPIC
1499 /* If bit 0 of InterruptType is set, then this is the SCI
1500 interrupt in the GPEx_STS register. */
1505 /* If bit 1 of InterruptType is set, then this is the I/O
1506 APIC/SAPIC interrupt. */
1507 u32 GlobalSystemInterrupt;
1509 /* The actual register address. */
1510 struct acpi_generic_address addr;
1514 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1517 static __devinit int try_init_acpi(struct SPMITable *spmi)
1519 struct smi_info *info;
1523 if (spmi->IPMIlegacy != 1) {
1524 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1528 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1529 addr_space = IPMI_MEM_ADDR_SPACE;
1531 addr_space = IPMI_IO_ADDR_SPACE;
1533 info = kzalloc(sizeof(*info), GFP_KERNEL);
1535 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1539 info->addr_source = "ACPI";
1541 /* Figure out the interface type. */
1542 switch (spmi->InterfaceType)
1545 info->si_type = SI_KCS;
1548 info->si_type = SI_SMIC;
1551 info->si_type = SI_BT;
1554 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1555 spmi->InterfaceType);
1560 if (spmi->InterruptType & 1) {
1561 /* We've got a GPE interrupt. */
1562 info->irq = spmi->GPE;
1563 info->irq_setup = acpi_gpe_irq_setup;
1564 } else if (spmi->InterruptType & 2) {
1565 /* We've got an APIC/SAPIC interrupt. */
1566 info->irq = spmi->GlobalSystemInterrupt;
1567 info->irq_setup = std_irq_setup;
1569 /* Use the default interrupt setting. */
1571 info->irq_setup = NULL;
1574 if (spmi->addr.register_bit_width) {
1575 /* A (hopefully) properly formed register bit width. */
1576 info->io.regspacing = spmi->addr.register_bit_width / 8;
1578 info->io.regspacing = DEFAULT_REGSPACING;
1580 info->io.regsize = info->io.regspacing;
1581 info->io.regshift = spmi->addr.register_bit_offset;
1583 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1585 info->io_setup = mem_setup;
1586 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1587 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1589 info->io_setup = port_setup;
1590 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1593 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1596 info->io.addr_data = spmi->addr.address;
1603 static __devinit void acpi_find_bmc(void)
1606 struct SPMITable *spmi;
1615 for (i = 0; ; i++) {
1616 status = acpi_get_firmware_table("SPMI", i+1,
1617 ACPI_LOGICAL_ADDRESSING,
1618 (struct acpi_table_header **)
1620 if (status != AE_OK)
1623 try_init_acpi(spmi);
1629 struct dmi_ipmi_data
1633 unsigned long base_addr;
1639 static int __devinit decode_dmi(struct dmi_header *dm,
1640 struct dmi_ipmi_data *dmi)
1642 u8 *data = (u8 *)dm;
1643 unsigned long base_addr;
1645 u8 len = dm->length;
1647 dmi->type = data[4];
1649 memcpy(&base_addr, data+8, sizeof(unsigned long));
1651 if (base_addr & 1) {
1653 base_addr &= 0xFFFE;
1654 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1658 dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1660 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1662 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1664 dmi->irq = data[0x11];
1666 /* The top two bits of byte 0x10 hold the register spacing. */
1667 reg_spacing = (data[0x10] & 0xC0) >> 6;
1668 switch(reg_spacing){
1669 case 0x00: /* Byte boundaries */
1672 case 0x01: /* 32-bit boundaries */
1675 case 0x02: /* 16-byte boundaries */
1679 /* Some other interface, just ignore it. */
1684 /* Note that technically, the lower bit of the base
1685 * address should be 1 if the address is I/O and 0 if
1686 * the address is in memory. So many systems get that
1687 * wrong (and all that I have seen are I/O) so we just
1688 * ignore that bit and assume I/O. Systems that use
1689 * memory should use the newer spec, anyway. */
1690 dmi->base_addr = base_addr & 0xfffe;
1691 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1695 dmi->slave_addr = data[6];
1700 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1702 struct smi_info *info;
1704 info = kzalloc(sizeof(*info), GFP_KERNEL);
1707 "ipmi_si: Could not allocate SI data\n");
1711 info->addr_source = "SMBIOS";
1713 switch (ipmi_data->type) {
1714 case 0x01: /* KCS */
1715 info->si_type = SI_KCS;
1717 case 0x02: /* SMIC */
1718 info->si_type = SI_SMIC;
1721 info->si_type = SI_BT;
1727 switch (ipmi_data->addr_space) {
1728 case IPMI_MEM_ADDR_SPACE:
1729 info->io_setup = mem_setup;
1730 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1733 case IPMI_IO_ADDR_SPACE:
1734 info->io_setup = port_setup;
1735 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1741 "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
1742 ipmi_data->addr_space);
1745 info->io.addr_data = ipmi_data->base_addr;
1747 info->io.regspacing = ipmi_data->offset;
1748 if (!info->io.regspacing)
1749 info->io.regspacing = DEFAULT_REGSPACING;
1750 info->io.regsize = DEFAULT_REGSPACING;
1751 info->io.regshift = 0;
1753 info->slave_addr = ipmi_data->slave_addr;
1755 info->irq = ipmi_data->irq;
1757 info->irq_setup = std_irq_setup;
1762 static void __devinit dmi_find_bmc(void)
1764 struct dmi_device *dev = NULL;
1765 struct dmi_ipmi_data data;
1768 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
1769 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
1771 try_init_dmi(&data);
1774 #endif /* CONFIG_DMI */
1778 #define PCI_ERMC_CLASSCODE 0x0C0700
1779 #define PCI_ERMC_CLASSCODE_MASK 0xffffff00
1780 #define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
1781 #define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
1782 #define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
1783 #define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
1785 #define PCI_HP_VENDOR_ID 0x103C
1786 #define PCI_MMC_DEVICE_ID 0x121A
1787 #define PCI_MMC_ADDR_CW 0x10
1789 static void ipmi_pci_cleanup(struct smi_info *info)
1791 struct pci_dev *pdev = info->addr_source_data;
1793 pci_disable_device(pdev);
1796 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
1797 const struct pci_device_id *ent)
1800 int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
1801 struct smi_info *info;
1802 int first_reg_offset = 0;
1804 info = kzalloc(sizeof(*info), GFP_KERNEL);
1808 info->addr_source = "PCI";
1810 switch (class_type) {
1811 case PCI_ERMC_CLASSCODE_TYPE_SMIC:
1812 info->si_type = SI_SMIC;
1815 case PCI_ERMC_CLASSCODE_TYPE_KCS:
1816 info->si_type = SI_KCS;
1819 case PCI_ERMC_CLASSCODE_TYPE_BT:
1820 info->si_type = SI_BT;
1825 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
1826 pci_name(pdev), class_type);
1830 rv = pci_enable_device(pdev);
1832 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
1838 info->addr_source_cleanup = ipmi_pci_cleanup;
1839 info->addr_source_data = pdev;
1841 if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
1842 first_reg_offset = 1;
1844 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
1845 info->io_setup = port_setup;
1846 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1848 info->io_setup = mem_setup;
1849 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1851 info->io.addr_data = pci_resource_start(pdev, 0);
1853 info->io.regspacing = DEFAULT_REGSPACING;
1854 info->io.regsize = DEFAULT_REGSPACING;
1855 info->io.regshift = 0;
1857 info->irq = pdev->irq;
1859 info->irq_setup = std_irq_setup;
1861 return try_smi_init(info);
1864 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
1869 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1874 static int ipmi_pci_resume(struct pci_dev *pdev)
1880 static struct pci_device_id ipmi_pci_devices[] = {
1881 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
1882 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE) }
1884 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
1886 static struct pci_driver ipmi_pci_driver = {
1887 .name = DEVICE_NAME,
1888 .id_table = ipmi_pci_devices,
1889 .probe = ipmi_pci_probe,
1890 .remove = __devexit_p(ipmi_pci_remove),
1892 .suspend = ipmi_pci_suspend,
1893 .resume = ipmi_pci_resume,
1896 #endif /* CONFIG_PCI */
1899 static int try_get_dev_id(struct smi_info *smi_info)
1901 unsigned char msg[2];
1902 unsigned char *resp;
1903 unsigned long resp_len;
1904 enum si_sm_result smi_result;
1907 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1911 /* Do a Get Device ID command, since it comes back with some
1913 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1914 msg[1] = IPMI_GET_DEVICE_ID_CMD;
1915 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1917 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1920 if (smi_result == SI_SM_CALL_WITH_DELAY ||
1921 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
1922 schedule_timeout_uninterruptible(1);
1923 smi_result = smi_info->handlers->event(
1924 smi_info->si_sm, 100);
1926 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1928 smi_result = smi_info->handlers->event(
1929 smi_info->si_sm, 0);
1934 if (smi_result == SI_SM_HOSED) {
1935 /* We couldn't get the state machine to run, so whatever's at
1936 the port is probably not an IPMI SMI interface. */
1941 /* Otherwise, we got some data. */
1942 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1943 resp, IPMI_MAX_MSG_LENGTH);
1945 /* That's odd, it should be longer. */
1950 if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
1951 /* That's odd, it shouldn't be able to fail. */
1956 /* Record info from the get device id, in case we need it. */
1957 memcpy(&smi_info->device_id, &resp[3],
1958 min_t(unsigned long, resp_len-3, sizeof(smi_info->device_id)));
1965 static int type_file_read_proc(char *page, char **start, off_t off,
1966 int count, int *eof, void *data)
1968 char *out = (char *) page;
1969 struct smi_info *smi = data;
1971 switch (smi->si_type) {
1973 return sprintf(out, "kcs\n");
1975 return sprintf(out, "smic\n");
1977 return sprintf(out, "bt\n");
1983 static int stat_file_read_proc(char *page, char **start, off_t off,
1984 int count, int *eof, void *data)
1986 char *out = (char *) page;
1987 struct smi_info *smi = data;
1989 out += sprintf(out, "interrupts_enabled: %d\n",
1990 smi->irq && !smi->interrupt_disabled);
1991 out += sprintf(out, "short_timeouts: %ld\n",
1992 smi->short_timeouts);
1993 out += sprintf(out, "long_timeouts: %ld\n",
1994 smi->long_timeouts);
1995 out += sprintf(out, "timeout_restarts: %ld\n",
1996 smi->timeout_restarts);
1997 out += sprintf(out, "idles: %ld\n",
1999 out += sprintf(out, "interrupts: %ld\n",
2001 out += sprintf(out, "attentions: %ld\n",
2003 out += sprintf(out, "flag_fetches: %ld\n",
2005 out += sprintf(out, "hosed_count: %ld\n",
2007 out += sprintf(out, "complete_transactions: %ld\n",
2008 smi->complete_transactions);
2009 out += sprintf(out, "events: %ld\n",
2011 out += sprintf(out, "watchdog_pretimeouts: %ld\n",
2012 smi->watchdog_pretimeouts);
2013 out += sprintf(out, "incoming_messages: %ld\n",
2014 smi->incoming_messages);
2016 return (out - ((char *) page));
2020 * oem_data_avail_to_receive_msg_avail
2021 * @info - smi_info structure with msg_flags set
2023 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2024 * Returns 1 indicating need to re-run handle_flags().
2026 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2028 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2034 * setup_dell_poweredge_oem_data_handler
2035 * @info - smi_info.device_id must be populated
2037 * Systems that match, but have firmware version < 1.40 may assert
2038 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2039 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
2040 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2041 * as RECEIVE_MSG_AVAIL instead.
2043 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2044 * assert the OEM[012] bits, and if it did, the driver would have to
2045 * change to handle that properly, we don't actually check for the
2047 * Device ID = 0x20 BMC on PowerEdge 8G servers
2048 * Device Revision = 0x80
2049 * Firmware Revision1 = 0x01 BMC version 1.40
2050 * Firmware Revision2 = 0x40 BCD encoded
2051 * IPMI Version = 0x51 IPMI 1.5
2052 * Manufacturer ID = A2 02 00 Dell IANA
2054 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2055 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2058 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2059 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2060 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2061 #define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
2062 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2064 struct ipmi_device_id *id = &smi_info->device_id;
2065 const char mfr[3]=DELL_IANA_MFR_ID;
2066 if (!memcmp(mfr, id->manufacturer_id, sizeof(mfr))) {
2067 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
2068 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2069 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2070 smi_info->oem_data_avail_handler =
2071 oem_data_avail_to_receive_msg_avail;
2073 else if (ipmi_version_major(id) < 1 ||
2074 (ipmi_version_major(id) == 1 &&
2075 ipmi_version_minor(id) < 5)) {
2076 smi_info->oem_data_avail_handler =
2077 oem_data_avail_to_receive_msg_avail;
2082 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2083 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2085 struct ipmi_smi_msg *msg = smi_info->curr_msg;
2087 /* Make it a reponse */
2088 msg->rsp[0] = msg->data[0] | 4;
2089 msg->rsp[1] = msg->data[1];
2090 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2092 smi_info->curr_msg = NULL;
2093 deliver_recv_msg(smi_info, msg);
2097 * dell_poweredge_bt_xaction_handler
2098 * @info - smi_info.device_id must be populated
2100 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2101 * not respond to a Get SDR command if the length of the data
2102 * requested is exactly 0x3A, which leads to command timeouts and no
2103 * data returned. This intercepts such commands, and causes userspace
2104 * callers to try again with a different-sized buffer, which succeeds.
2107 #define STORAGE_NETFN 0x0A
2108 #define STORAGE_CMD_GET_SDR 0x23
2109 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2110 unsigned long unused,
2113 struct smi_info *smi_info = in;
2114 unsigned char *data = smi_info->curr_msg->data;
2115 unsigned int size = smi_info->curr_msg->data_size;
2117 (data[0]>>2) == STORAGE_NETFN &&
2118 data[1] == STORAGE_CMD_GET_SDR &&
2120 return_hosed_msg_badsize(smi_info);
2126 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2127 .notifier_call = dell_poweredge_bt_xaction_handler,
2131 * setup_dell_poweredge_bt_xaction_handler
2132 * @info - smi_info.device_id must be filled in already
2134 * Fills in smi_info.device_id.start_transaction_pre_hook
2135 * when we know what function to use there.
2138 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2140 struct ipmi_device_id *id = &smi_info->device_id;
2141 const char mfr[3]=DELL_IANA_MFR_ID;
2142 if (!memcmp(mfr, id->manufacturer_id, sizeof(mfr)) &&
2143 smi_info->si_type == SI_BT)
2144 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2148 * setup_oem_data_handler
2149 * @info - smi_info.device_id must be filled in already
2151 * Fills in smi_info.device_id.oem_data_available_handler
2152 * when we know what function to use there.
2155 static void setup_oem_data_handler(struct smi_info *smi_info)
2157 setup_dell_poweredge_oem_data_handler(smi_info);
2160 static void setup_xaction_handlers(struct smi_info *smi_info)
2162 setup_dell_poweredge_bt_xaction_handler(smi_info);
2165 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2167 if (smi_info->thread != NULL && smi_info->thread != ERR_PTR(-ENOMEM))
2168 kthread_stop(smi_info->thread);
2169 del_timer_sync(&smi_info->si_timer);
2172 static struct ipmi_default_vals
2176 } __devinit ipmi_defaults[] =
2178 { .type = SI_KCS, .port = 0xca2 },
2179 { .type = SI_SMIC, .port = 0xca9 },
2180 { .type = SI_BT, .port = 0xe4 },
2184 static __devinit void default_find_bmc(void)
2186 struct smi_info *info;
2189 for (i = 0; ; i++) {
2190 if (!ipmi_defaults[i].port)
2193 info = kzalloc(sizeof(*info), GFP_KERNEL);
2197 info->addr_source = NULL;
2199 info->si_type = ipmi_defaults[i].type;
2200 info->io_setup = port_setup;
2201 info->io.addr_data = ipmi_defaults[i].port;
2202 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2204 info->io.addr = NULL;
2205 info->io.regspacing = DEFAULT_REGSPACING;
2206 info->io.regsize = DEFAULT_REGSPACING;
2207 info->io.regshift = 0;
2209 if (try_smi_init(info) == 0) {
2211 printk(KERN_INFO "ipmi_si: Found default %s state"
2212 " machine at %s address 0x%lx\n",
2213 si_to_str[info->si_type],
2214 addr_space_to_str[info->io.addr_type],
2215 info->io.addr_data);
2221 static int is_new_interface(struct smi_info *info)
2225 list_for_each_entry(e, &smi_infos, link) {
2226 if (e->io.addr_type != info->io.addr_type)
2228 if (e->io.addr_data == info->io.addr_data)
2235 static int try_smi_init(struct smi_info *new_smi)
2239 if (new_smi->addr_source) {
2240 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2241 " machine at %s address 0x%lx, slave address 0x%x,"
2243 new_smi->addr_source,
2244 si_to_str[new_smi->si_type],
2245 addr_space_to_str[new_smi->io.addr_type],
2246 new_smi->io.addr_data,
2247 new_smi->slave_addr, new_smi->irq);
2250 down(&smi_infos_lock);
2251 if (!is_new_interface(new_smi)) {
2252 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2257 /* So we know not to free it unless we have allocated one. */
2258 new_smi->intf = NULL;
2259 new_smi->si_sm = NULL;
2260 new_smi->handlers = NULL;
2262 switch (new_smi->si_type) {
2264 new_smi->handlers = &kcs_smi_handlers;
2268 new_smi->handlers = &smic_smi_handlers;
2272 new_smi->handlers = &bt_smi_handlers;
2276 /* No support for anything else yet. */
2281 /* Allocate the state machine's data and initialize it. */
2282 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2283 if (!new_smi->si_sm) {
2284 printk(" Could not allocate state machine memory\n");
2288 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2291 /* Now that we know the I/O size, we can set up the I/O. */
2292 rv = new_smi->io_setup(new_smi);
2294 printk(" Could not set up I/O space\n");
2298 spin_lock_init(&(new_smi->si_lock));
2299 spin_lock_init(&(new_smi->msg_lock));
2300 spin_lock_init(&(new_smi->count_lock));
2302 /* Do low-level detection first. */
2303 if (new_smi->handlers->detect(new_smi->si_sm)) {
2304 if (new_smi->addr_source)
2305 printk(KERN_INFO "ipmi_si: Interface detection"
2311 /* Attempt a get device id command. If it fails, we probably
2312 don't have a BMC here. */
2313 rv = try_get_dev_id(new_smi);
2315 if (new_smi->addr_source)
2316 printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2317 " at this location\n");
2321 setup_oem_data_handler(new_smi);
2322 setup_xaction_handlers(new_smi);
2324 /* Try to claim any interrupts. */
2325 if (new_smi->irq_setup)
2326 new_smi->irq_setup(new_smi);
2328 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2329 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2330 new_smi->curr_msg = NULL;
2331 atomic_set(&new_smi->req_events, 0);
2332 new_smi->run_to_completion = 0;
2334 new_smi->interrupt_disabled = 0;
2335 atomic_set(&new_smi->stop_operation, 0);
2336 new_smi->intf_num = smi_num;
2339 /* Start clearing the flags before we enable interrupts or the
2340 timer to avoid racing with the timer. */
2341 start_clear_flags(new_smi);
2342 /* IRQ is defined to be set when non-zero. */
2344 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2346 /* The ipmi_register_smi() code does some operations to
2347 determine the channel information, so we must be ready to
2348 handle operations before it is called. This means we have
2349 to stop the timer if we get an error after this point. */
2350 init_timer(&(new_smi->si_timer));
2351 new_smi->si_timer.data = (long) new_smi;
2352 new_smi->si_timer.function = smi_timeout;
2353 new_smi->last_timeout_jiffies = jiffies;
2354 new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
2356 add_timer(&(new_smi->si_timer));
2357 if (new_smi->si_type != SI_BT)
2358 new_smi->thread = kthread_run(ipmi_thread, new_smi,
2359 "kipmi%d", new_smi->intf_num);
2361 rv = ipmi_register_smi(&handlers,
2363 ipmi_version_major(&new_smi->device_id),
2364 ipmi_version_minor(&new_smi->device_id),
2365 new_smi->slave_addr,
2369 "ipmi_si: Unable to register device: error %d\n",
2371 goto out_err_stop_timer;
2374 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2375 type_file_read_proc, NULL,
2376 new_smi, THIS_MODULE);
2379 "ipmi_si: Unable to create proc entry: %d\n",
2381 goto out_err_stop_timer;
2384 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2385 stat_file_read_proc, NULL,
2386 new_smi, THIS_MODULE);
2389 "ipmi_si: Unable to create proc entry: %d\n",
2391 goto out_err_stop_timer;
2394 list_add_tail(&new_smi->link, &smi_infos);
2396 up(&smi_infos_lock);
2398 printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2403 atomic_inc(&new_smi->stop_operation);
2404 wait_for_timer_and_thread(new_smi);
2408 ipmi_unregister_smi(new_smi->intf);
2410 if (new_smi->irq_cleanup)
2411 new_smi->irq_cleanup(new_smi);
2413 /* Wait until we know that we are out of any interrupt
2414 handlers might have been running before we freed the
2416 synchronize_sched();
2418 if (new_smi->si_sm) {
2419 if (new_smi->handlers)
2420 new_smi->handlers->cleanup(new_smi->si_sm);
2421 kfree(new_smi->si_sm);
2423 if (new_smi->addr_source_cleanup)
2424 new_smi->addr_source_cleanup(new_smi);
2425 if (new_smi->io_cleanup)
2426 new_smi->io_cleanup(new_smi);
2428 up(&smi_infos_lock);
2433 static __devinit int init_ipmi_si(void)
2442 /* Parse out the si_type string into its components. */
2445 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2447 str = strchr(str, ',');
2457 printk(KERN_INFO "IPMI System Interface driver.\n");
2459 hardcode_find_bmc();
2471 pci_module_init(&ipmi_pci_driver);
2474 if (si_trydefaults) {
2475 down(&smi_infos_lock);
2476 if (list_empty(&smi_infos)) {
2477 /* No BMC was found, try defaults. */
2478 up(&smi_infos_lock);
2481 up(&smi_infos_lock);
2485 down(&smi_infos_lock);
2486 if (list_empty(&smi_infos)) {
2487 up(&smi_infos_lock);
2489 pci_unregister_driver(&ipmi_pci_driver);
2491 printk("ipmi_si: Unable to find any System Interface(s)\n");
2494 up(&smi_infos_lock);
2498 module_init(init_ipmi_si);
2500 static void __devexit cleanup_one_si(struct smi_info *to_clean)
2503 unsigned long flags;
2508 list_del(&to_clean->link);
2510 /* Tell the timer and interrupt handlers that we are shutting
2512 spin_lock_irqsave(&(to_clean->si_lock), flags);
2513 spin_lock(&(to_clean->msg_lock));
2515 atomic_inc(&to_clean->stop_operation);
2517 if (to_clean->irq_cleanup)
2518 to_clean->irq_cleanup(to_clean);
2520 spin_unlock(&(to_clean->msg_lock));
2521 spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2523 /* Wait until we know that we are out of any interrupt
2524 handlers might have been running before we freed the
2526 synchronize_sched();
2528 wait_for_timer_and_thread(to_clean);
2530 /* Interrupts and timeouts are stopped, now make sure the
2531 interface is in a clean state. */
2532 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2534 schedule_timeout_uninterruptible(1);
2537 rv = ipmi_unregister_smi(to_clean->intf);
2540 "ipmi_si: Unable to unregister device: errno=%d\n",
2544 to_clean->handlers->cleanup(to_clean->si_sm);
2546 kfree(to_clean->si_sm);
2548 if (to_clean->addr_source_cleanup)
2549 to_clean->addr_source_cleanup(to_clean);
2550 if (to_clean->io_cleanup)
2551 to_clean->io_cleanup(to_clean);
2554 static __exit void cleanup_ipmi_si(void)
2556 struct smi_info *e, *tmp_e;
2562 pci_unregister_driver(&ipmi_pci_driver);
2565 down(&smi_infos_lock);
2566 list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2568 up(&smi_infos_lock);
2570 module_exit(cleanup_ipmi_si);
2572 MODULE_LICENSE("GPL");
2573 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2574 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");