4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
11 * Copyright 2002 MontaVista Software Inc.
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You should have received a copy of the GNU General Public License along
31 * with this program; if not, write to the Free Software Foundation, Inc.,
32 * 675 Mass Ave, Cambridge, MA 02139, USA.
36 * This file holds the "policy" for the interface to the SMI state
37 * machine. It does the configuration, handles timers and interrupts,
38 * and drives the real SMI state machine.
41 #include <linux/module.h>
42 #include <linux/moduleparam.h>
43 #include <asm/system.h>
44 #include <linux/sched.h>
45 #include <linux/timer.h>
46 #include <linux/errno.h>
47 #include <linux/spinlock.h>
48 #include <linux/slab.h>
49 #include <linux/delay.h>
50 #include <linux/list.h>
51 #include <linux/pci.h>
52 #include <linux/ioport.h>
53 #include <linux/notifier.h>
54 #include <linux/mutex.h>
55 #include <linux/kthread.h>
57 #include <linux/interrupt.h>
58 #include <linux/rcupdate.h>
59 #include <linux/ipmi_smi.h>
61 #include "ipmi_si_sm.h"
62 #include <linux/init.h>
63 #include <linux/dmi.h>
64 #include <linux/string.h>
65 #include <linux/ctype.h>
67 #define PFX "ipmi_si: "
69 /* Measure times between events in the driver. */
72 /* Call every 10 ms. */
73 #define SI_TIMEOUT_TIME_USEC 10000
74 #define SI_USEC_PER_JIFFY (1000000/HZ)
75 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
76 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
84 SI_CLEARING_FLAGS_THEN_SET_IRQ,
86 SI_ENABLE_INTERRUPTS1,
88 /* FIXME - add watchdog stuff. */
91 /* Some BT-specific defines we need here. */
92 #define IPMI_BT_INTMASK_REG 2
93 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
94 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
97 SI_KCS, SI_SMIC, SI_BT
99 static char *si_to_str[] = { "kcs", "smic", "bt" };
101 #define DEVICE_NAME "ipmi_si"
103 static struct device_driver ipmi_driver =
106 .bus = &platform_bus_type
113 struct si_sm_data *si_sm;
114 struct si_sm_handlers *handlers;
115 enum si_type si_type;
118 struct list_head xmit_msgs;
119 struct list_head hp_xmit_msgs;
120 struct ipmi_smi_msg *curr_msg;
121 enum si_intf_state si_state;
123 /* Used to handle the various types of I/O that can occur with
126 int (*io_setup)(struct smi_info *info);
127 void (*io_cleanup)(struct smi_info *info);
128 int (*irq_setup)(struct smi_info *info);
129 void (*irq_cleanup)(struct smi_info *info);
130 unsigned int io_size;
131 char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
132 void (*addr_source_cleanup)(struct smi_info *info);
133 void *addr_source_data;
135 /* Per-OEM handler, called from handle_flags().
136 Returns 1 when handle_flags() needs to be re-run
137 or 0 indicating it set si_state itself.
139 int (*oem_data_avail_handler)(struct smi_info *smi_info);
141 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
142 is set to hold the flags until we are done handling everything
144 #define RECEIVE_MSG_AVAIL 0x01
145 #define EVENT_MSG_BUFFER_FULL 0x02
146 #define WDT_PRE_TIMEOUT_INT 0x08
147 #define OEM0_DATA_AVAIL 0x20
148 #define OEM1_DATA_AVAIL 0x40
149 #define OEM2_DATA_AVAIL 0x80
150 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
153 unsigned char msg_flags;
155 /* If set to true, this will request events the next time the
156 state machine is idle. */
159 /* If true, run the state machine to completion on every send
160 call. Generally used after a panic to make sure stuff goes
162 int run_to_completion;
164 /* The I/O port of an SI interface. */
167 /* The space between start addresses of the two ports. For
168 instance, if the first port is 0xca2 and the spacing is 4, then
169 the second port is 0xca6. */
170 unsigned int spacing;
172 /* zero if no irq; */
175 /* The timer for this si. */
176 struct timer_list si_timer;
178 /* The time (in jiffies) the last timeout occurred at. */
179 unsigned long last_timeout_jiffies;
181 /* Used to gracefully stop the timer without race conditions. */
182 atomic_t stop_operation;
184 /* The driver will disable interrupts when it gets into a
185 situation where it cannot handle messages due to lack of
186 memory. Once that situation clears up, it will re-enable
188 int interrupt_disabled;
190 /* From the get device id response... */
191 struct ipmi_device_id device_id;
193 /* Driver model stuff. */
195 struct platform_device *pdev;
197 /* True if we allocated the device, false if it came from
198 * someplace else (like PCI). */
201 /* Slave address, could be reported from DMI. */
202 unsigned char slave_addr;
204 /* Counters and things for the proc filesystem. */
205 spinlock_t count_lock;
206 unsigned long short_timeouts;
207 unsigned long long_timeouts;
208 unsigned long timeout_restarts;
210 unsigned long interrupts;
211 unsigned long attentions;
212 unsigned long flag_fetches;
213 unsigned long hosed_count;
214 unsigned long complete_transactions;
215 unsigned long events;
216 unsigned long watchdog_pretimeouts;
217 unsigned long incoming_messages;
219 struct task_struct *thread;
221 struct list_head link;
224 #define SI_MAX_PARMS 4
226 static int force_kipmid[SI_MAX_PARMS];
227 static int num_force_kipmid;
229 static int unload_when_empty = 1;
231 static int try_smi_init(struct smi_info *smi);
232 static void cleanup_one_si(struct smi_info *to_clean);
234 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
235 static int register_xaction_notifier(struct notifier_block * nb)
237 return atomic_notifier_chain_register(&xaction_notifier_list, nb);
240 static void deliver_recv_msg(struct smi_info *smi_info,
241 struct ipmi_smi_msg *msg)
243 /* Deliver the message to the upper layer with the lock
245 spin_unlock(&(smi_info->si_lock));
246 ipmi_smi_msg_received(smi_info->intf, msg);
247 spin_lock(&(smi_info->si_lock));
250 static void return_hosed_msg(struct smi_info *smi_info)
252 struct ipmi_smi_msg *msg = smi_info->curr_msg;
254 /* Make it a reponse */
255 msg->rsp[0] = msg->data[0] | 4;
256 msg->rsp[1] = msg->data[1];
257 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
260 smi_info->curr_msg = NULL;
261 deliver_recv_msg(smi_info, msg);
264 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
267 struct list_head *entry = NULL;
272 /* No need to save flags, we aleady have interrupts off and we
273 already hold the SMI lock. */
274 spin_lock(&(smi_info->msg_lock));
276 /* Pick the high priority queue first. */
277 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
278 entry = smi_info->hp_xmit_msgs.next;
279 } else if (!list_empty(&(smi_info->xmit_msgs))) {
280 entry = smi_info->xmit_msgs.next;
284 smi_info->curr_msg = NULL;
290 smi_info->curr_msg = list_entry(entry,
295 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
297 err = atomic_notifier_call_chain(&xaction_notifier_list,
299 if (err & NOTIFY_STOP_MASK) {
300 rv = SI_SM_CALL_WITHOUT_DELAY;
303 err = smi_info->handlers->start_transaction(
305 smi_info->curr_msg->data,
306 smi_info->curr_msg->data_size);
308 return_hosed_msg(smi_info);
311 rv = SI_SM_CALL_WITHOUT_DELAY;
314 spin_unlock(&(smi_info->msg_lock));
319 static void start_enable_irq(struct smi_info *smi_info)
321 unsigned char msg[2];
323 /* If we are enabling interrupts, we have to tell the
325 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
326 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
328 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
329 smi_info->si_state = SI_ENABLE_INTERRUPTS1;
332 static void start_clear_flags(struct smi_info *smi_info)
334 unsigned char msg[3];
336 /* Make sure the watchdog pre-timeout flag is not set at startup. */
337 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
338 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
339 msg[2] = WDT_PRE_TIMEOUT_INT;
341 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
342 smi_info->si_state = SI_CLEARING_FLAGS;
345 /* When we have a situtaion where we run out of memory and cannot
346 allocate messages, we just leave them in the BMC and run the system
347 polled until we can allocate some memory. Once we have some
348 memory, we will re-enable the interrupt. */
349 static inline void disable_si_irq(struct smi_info *smi_info)
351 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
352 disable_irq_nosync(smi_info->irq);
353 smi_info->interrupt_disabled = 1;
357 static inline void enable_si_irq(struct smi_info *smi_info)
359 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
360 enable_irq(smi_info->irq);
361 smi_info->interrupt_disabled = 0;
365 static void handle_flags(struct smi_info *smi_info)
368 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
369 /* Watchdog pre-timeout */
370 spin_lock(&smi_info->count_lock);
371 smi_info->watchdog_pretimeouts++;
372 spin_unlock(&smi_info->count_lock);
374 start_clear_flags(smi_info);
375 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
376 spin_unlock(&(smi_info->si_lock));
377 ipmi_smi_watchdog_pretimeout(smi_info->intf);
378 spin_lock(&(smi_info->si_lock));
379 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
380 /* Messages available. */
381 smi_info->curr_msg = ipmi_alloc_smi_msg();
382 if (!smi_info->curr_msg) {
383 disable_si_irq(smi_info);
384 smi_info->si_state = SI_NORMAL;
387 enable_si_irq(smi_info);
389 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
390 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
391 smi_info->curr_msg->data_size = 2;
393 smi_info->handlers->start_transaction(
395 smi_info->curr_msg->data,
396 smi_info->curr_msg->data_size);
397 smi_info->si_state = SI_GETTING_MESSAGES;
398 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
399 /* Events available. */
400 smi_info->curr_msg = ipmi_alloc_smi_msg();
401 if (!smi_info->curr_msg) {
402 disable_si_irq(smi_info);
403 smi_info->si_state = SI_NORMAL;
406 enable_si_irq(smi_info);
408 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
409 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
410 smi_info->curr_msg->data_size = 2;
412 smi_info->handlers->start_transaction(
414 smi_info->curr_msg->data,
415 smi_info->curr_msg->data_size);
416 smi_info->si_state = SI_GETTING_EVENTS;
417 } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
418 smi_info->oem_data_avail_handler) {
419 if (smi_info->oem_data_avail_handler(smi_info))
422 smi_info->si_state = SI_NORMAL;
426 static void handle_transaction_done(struct smi_info *smi_info)
428 struct ipmi_smi_msg *msg;
433 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
435 switch (smi_info->si_state) {
437 if (!smi_info->curr_msg)
440 smi_info->curr_msg->rsp_size
441 = smi_info->handlers->get_result(
443 smi_info->curr_msg->rsp,
444 IPMI_MAX_MSG_LENGTH);
446 /* Do this here becase deliver_recv_msg() releases the
447 lock, and a new message can be put in during the
448 time the lock is released. */
449 msg = smi_info->curr_msg;
450 smi_info->curr_msg = NULL;
451 deliver_recv_msg(smi_info, msg);
454 case SI_GETTING_FLAGS:
456 unsigned char msg[4];
459 /* We got the flags from the SMI, now handle them. */
460 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
462 /* Error fetching flags, just give up for
464 smi_info->si_state = SI_NORMAL;
465 } else if (len < 4) {
466 /* Hmm, no flags. That's technically illegal, but
467 don't use uninitialized data. */
468 smi_info->si_state = SI_NORMAL;
470 smi_info->msg_flags = msg[3];
471 handle_flags(smi_info);
476 case SI_CLEARING_FLAGS:
477 case SI_CLEARING_FLAGS_THEN_SET_IRQ:
479 unsigned char msg[3];
481 /* We cleared the flags. */
482 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
484 /* Error clearing flags */
486 "ipmi_si: Error clearing flags: %2.2x\n",
489 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
490 start_enable_irq(smi_info);
492 smi_info->si_state = SI_NORMAL;
496 case SI_GETTING_EVENTS:
498 smi_info->curr_msg->rsp_size
499 = smi_info->handlers->get_result(
501 smi_info->curr_msg->rsp,
502 IPMI_MAX_MSG_LENGTH);
504 /* Do this here becase deliver_recv_msg() releases the
505 lock, and a new message can be put in during the
506 time the lock is released. */
507 msg = smi_info->curr_msg;
508 smi_info->curr_msg = NULL;
509 if (msg->rsp[2] != 0) {
510 /* Error getting event, probably done. */
513 /* Take off the event flag. */
514 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
515 handle_flags(smi_info);
517 spin_lock(&smi_info->count_lock);
519 spin_unlock(&smi_info->count_lock);
521 /* Do this before we deliver the message
522 because delivering the message releases the
523 lock and something else can mess with the
525 handle_flags(smi_info);
527 deliver_recv_msg(smi_info, msg);
532 case SI_GETTING_MESSAGES:
534 smi_info->curr_msg->rsp_size
535 = smi_info->handlers->get_result(
537 smi_info->curr_msg->rsp,
538 IPMI_MAX_MSG_LENGTH);
540 /* Do this here becase deliver_recv_msg() releases the
541 lock, and a new message can be put in during the
542 time the lock is released. */
543 msg = smi_info->curr_msg;
544 smi_info->curr_msg = NULL;
545 if (msg->rsp[2] != 0) {
546 /* Error getting event, probably done. */
549 /* Take off the msg flag. */
550 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
551 handle_flags(smi_info);
553 spin_lock(&smi_info->count_lock);
554 smi_info->incoming_messages++;
555 spin_unlock(&smi_info->count_lock);
557 /* Do this before we deliver the message
558 because delivering the message releases the
559 lock and something else can mess with the
561 handle_flags(smi_info);
563 deliver_recv_msg(smi_info, msg);
568 case SI_ENABLE_INTERRUPTS1:
570 unsigned char msg[4];
572 /* We got the flags from the SMI, now handle them. */
573 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
576 "ipmi_si: Could not enable interrupts"
577 ", failed get, using polled mode.\n");
578 smi_info->si_state = SI_NORMAL;
580 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
581 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
582 msg[2] = msg[3] | 1; /* enable msg queue int */
583 smi_info->handlers->start_transaction(
584 smi_info->si_sm, msg, 3);
585 smi_info->si_state = SI_ENABLE_INTERRUPTS2;
590 case SI_ENABLE_INTERRUPTS2:
592 unsigned char msg[4];
594 /* We got the flags from the SMI, now handle them. */
595 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
598 "ipmi_si: Could not enable interrupts"
599 ", failed set, using polled mode.\n");
601 smi_info->si_state = SI_NORMAL;
607 /* Called on timeouts and events. Timeouts should pass the elapsed
608 time, interrupts should pass in zero. */
609 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
612 enum si_sm_result si_sm_result;
615 /* There used to be a loop here that waited a little while
616 (around 25us) before giving up. That turned out to be
617 pointless, the minimum delays I was seeing were in the 300us
618 range, which is far too long to wait in an interrupt. So
619 we just run until the state machine tells us something
620 happened or it needs a delay. */
621 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
623 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
625 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
628 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
630 spin_lock(&smi_info->count_lock);
631 smi_info->complete_transactions++;
632 spin_unlock(&smi_info->count_lock);
634 handle_transaction_done(smi_info);
635 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
637 else if (si_sm_result == SI_SM_HOSED)
639 spin_lock(&smi_info->count_lock);
640 smi_info->hosed_count++;
641 spin_unlock(&smi_info->count_lock);
643 /* Do the before return_hosed_msg, because that
644 releases the lock. */
645 smi_info->si_state = SI_NORMAL;
646 if (smi_info->curr_msg != NULL) {
647 /* If we were handling a user message, format
648 a response to send to the upper layer to
649 tell it about the error. */
650 return_hosed_msg(smi_info);
652 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
655 /* We prefer handling attn over new messages. */
656 if (si_sm_result == SI_SM_ATTN)
658 unsigned char msg[2];
660 spin_lock(&smi_info->count_lock);
661 smi_info->attentions++;
662 spin_unlock(&smi_info->count_lock);
664 /* Got a attn, send down a get message flags to see
665 what's causing it. It would be better to handle
666 this in the upper layer, but due to the way
667 interrupts work with the SMI, that's not really
669 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
670 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
672 smi_info->handlers->start_transaction(
673 smi_info->si_sm, msg, 2);
674 smi_info->si_state = SI_GETTING_FLAGS;
678 /* If we are currently idle, try to start the next message. */
679 if (si_sm_result == SI_SM_IDLE) {
680 spin_lock(&smi_info->count_lock);
682 spin_unlock(&smi_info->count_lock);
684 si_sm_result = start_next_msg(smi_info);
685 if (si_sm_result != SI_SM_IDLE)
689 if ((si_sm_result == SI_SM_IDLE)
690 && (atomic_read(&smi_info->req_events)))
692 /* We are idle and the upper layer requested that I fetch
694 atomic_set(&smi_info->req_events, 0);
696 smi_info->curr_msg = ipmi_alloc_smi_msg();
697 if (!smi_info->curr_msg)
700 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
701 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
702 smi_info->curr_msg->data_size = 2;
704 smi_info->handlers->start_transaction(
706 smi_info->curr_msg->data,
707 smi_info->curr_msg->data_size);
708 smi_info->si_state = SI_GETTING_EVENTS;
715 static void sender(void *send_info,
716 struct ipmi_smi_msg *msg,
719 struct smi_info *smi_info = send_info;
720 enum si_sm_result result;
726 if (atomic_read(&smi_info->stop_operation)) {
727 msg->rsp[0] = msg->data[0] | 4;
728 msg->rsp[1] = msg->data[1];
729 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
731 deliver_recv_msg(smi_info, msg);
735 spin_lock_irqsave(&(smi_info->msg_lock), flags);
738 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
741 if (smi_info->run_to_completion) {
742 /* If we are running to completion, then throw it in
743 the list and run transactions until everything is
744 clear. Priority doesn't matter here. */
745 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
747 /* We have to release the msg lock and claim the smi
748 lock in this case, because of race conditions. */
749 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
751 spin_lock_irqsave(&(smi_info->si_lock), flags);
752 result = smi_event_handler(smi_info, 0);
753 while (result != SI_SM_IDLE) {
754 udelay(SI_SHORT_TIMEOUT_USEC);
755 result = smi_event_handler(smi_info,
756 SI_SHORT_TIMEOUT_USEC);
758 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
762 list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
764 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
767 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
769 spin_lock_irqsave(&(smi_info->si_lock), flags);
770 if ((smi_info->si_state == SI_NORMAL)
771 && (smi_info->curr_msg == NULL))
773 start_next_msg(smi_info);
775 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
778 static void set_run_to_completion(void *send_info, int i_run_to_completion)
780 struct smi_info *smi_info = send_info;
781 enum si_sm_result result;
784 spin_lock_irqsave(&(smi_info->si_lock), flags);
786 smi_info->run_to_completion = i_run_to_completion;
787 if (i_run_to_completion) {
788 result = smi_event_handler(smi_info, 0);
789 while (result != SI_SM_IDLE) {
790 udelay(SI_SHORT_TIMEOUT_USEC);
791 result = smi_event_handler(smi_info,
792 SI_SHORT_TIMEOUT_USEC);
796 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
799 static int ipmi_thread(void *data)
801 struct smi_info *smi_info = data;
803 enum si_sm_result smi_result;
805 set_user_nice(current, 19);
806 while (!kthread_should_stop()) {
807 spin_lock_irqsave(&(smi_info->si_lock), flags);
808 smi_result = smi_event_handler(smi_info, 0);
809 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
810 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
813 else if (smi_result == SI_SM_CALL_WITH_DELAY)
816 schedule_timeout_interruptible(1);
822 static void poll(void *send_info)
824 struct smi_info *smi_info = send_info;
827 * Make sure there is some delay in the poll loop so we can
828 * drive time forward and timeout things.
831 smi_event_handler(smi_info, 10);
834 static void request_events(void *send_info)
836 struct smi_info *smi_info = send_info;
838 if (atomic_read(&smi_info->stop_operation))
841 atomic_set(&smi_info->req_events, 1);
844 static int initialized = 0;
846 static void smi_timeout(unsigned long data)
848 struct smi_info *smi_info = (struct smi_info *) data;
849 enum si_sm_result smi_result;
851 unsigned long jiffies_now;
857 if (atomic_read(&smi_info->stop_operation))
860 spin_lock_irqsave(&(smi_info->si_lock), flags);
863 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
865 jiffies_now = jiffies;
866 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
867 * SI_USEC_PER_JIFFY);
868 smi_result = smi_event_handler(smi_info, time_diff);
870 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
872 smi_info->last_timeout_jiffies = jiffies_now;
874 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
875 /* Running with interrupts, only do long timeouts. */
876 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
877 spin_lock_irqsave(&smi_info->count_lock, flags);
878 smi_info->long_timeouts++;
879 spin_unlock_irqrestore(&smi_info->count_lock, flags);
883 /* If the state machine asks for a short delay, then shorten
884 the timer timeout. */
885 if (smi_result == SI_SM_CALL_WITH_DELAY) {
886 spin_lock_irqsave(&smi_info->count_lock, flags);
887 smi_info->short_timeouts++;
888 spin_unlock_irqrestore(&smi_info->count_lock, flags);
889 smi_info->si_timer.expires = jiffies + 1;
891 spin_lock_irqsave(&smi_info->count_lock, flags);
892 smi_info->long_timeouts++;
893 spin_unlock_irqrestore(&smi_info->count_lock, flags);
894 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
898 add_timer(&(smi_info->si_timer));
901 static irqreturn_t si_irq_handler(int irq, void *data)
903 struct smi_info *smi_info = data;
909 spin_lock_irqsave(&(smi_info->si_lock), flags);
911 spin_lock(&smi_info->count_lock);
912 smi_info->interrupts++;
913 spin_unlock(&smi_info->count_lock);
915 if (atomic_read(&smi_info->stop_operation))
920 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
922 smi_event_handler(smi_info, 0);
924 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
928 static irqreturn_t si_bt_irq_handler(int irq, void *data)
930 struct smi_info *smi_info = data;
931 /* We need to clear the IRQ flag for the BT interface. */
932 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
933 IPMI_BT_INTMASK_CLEAR_IRQ_BIT
934 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
935 return si_irq_handler(irq, data);
938 static int smi_start_processing(void *send_info,
941 struct smi_info *new_smi = send_info;
944 new_smi->intf = intf;
946 /* Set up the timer that drives the interface. */
947 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
948 new_smi->last_timeout_jiffies = jiffies;
949 mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
952 * Check if the user forcefully enabled the daemon.
954 if (new_smi->intf_num < num_force_kipmid)
955 enable = force_kipmid[new_smi->intf_num];
957 * The BT interface is efficient enough to not need a thread,
958 * and there is no need for a thread if we have interrupts.
960 else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
964 new_smi->thread = kthread_run(ipmi_thread, new_smi,
965 "kipmi%d", new_smi->intf_num);
966 if (IS_ERR(new_smi->thread)) {
967 printk(KERN_NOTICE "ipmi_si_intf: Could not start"
968 " kernel thread due to error %ld, only using"
969 " timers to drive the interface\n",
970 PTR_ERR(new_smi->thread));
971 new_smi->thread = NULL;
978 static void set_maintenance_mode(void *send_info, int enable)
980 struct smi_info *smi_info = send_info;
983 atomic_set(&smi_info->req_events, 0);
986 static struct ipmi_smi_handlers handlers =
988 .owner = THIS_MODULE,
989 .start_processing = smi_start_processing,
991 .request_events = request_events,
992 .set_maintenance_mode = set_maintenance_mode,
993 .set_run_to_completion = set_run_to_completion,
997 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
998 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
1000 static LIST_HEAD(smi_infos);
1001 static DEFINE_MUTEX(smi_infos_lock);
1002 static int smi_num; /* Used to sequence the SMIs */
1004 #define DEFAULT_REGSPACING 1
1006 static int si_trydefaults = 1;
1007 static char *si_type[SI_MAX_PARMS];
1008 #define MAX_SI_TYPE_STR 30
1009 static char si_type_str[MAX_SI_TYPE_STR];
1010 static unsigned long addrs[SI_MAX_PARMS];
1011 static int num_addrs;
1012 static unsigned int ports[SI_MAX_PARMS];
1013 static int num_ports;
1014 static int irqs[SI_MAX_PARMS];
1015 static int num_irqs;
1016 static int regspacings[SI_MAX_PARMS];
1017 static int num_regspacings = 0;
1018 static int regsizes[SI_MAX_PARMS];
1019 static int num_regsizes = 0;
1020 static int regshifts[SI_MAX_PARMS];
1021 static int num_regshifts = 0;
1022 static int slave_addrs[SI_MAX_PARMS];
1023 static int num_slave_addrs = 0;
1025 #define IPMI_IO_ADDR_SPACE 0
1026 #define IPMI_MEM_ADDR_SPACE 1
1027 static char *addr_space_to_str[] = { "I/O", "mem" };
1029 static int hotmod_handler(const char *val, struct kernel_param *kp);
1031 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1032 MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
1033 " Documentation/IPMI.txt in the kernel sources for the"
1036 module_param_named(trydefaults, si_trydefaults, bool, 0);
1037 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1038 " default scan of the KCS and SMIC interface at the standard"
1040 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1041 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1042 " interface separated by commas. The types are 'kcs',"
1043 " 'smic', and 'bt'. For example si_type=kcs,bt will set"
1044 " the first interface to kcs and the second to bt");
1045 module_param_array(addrs, long, &num_addrs, 0);
1046 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1047 " addresses separated by commas. Only use if an interface"
1048 " is in memory. Otherwise, set it to zero or leave"
1050 module_param_array(ports, int, &num_ports, 0);
1051 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1052 " addresses separated by commas. Only use if an interface"
1053 " is a port. Otherwise, set it to zero or leave"
1055 module_param_array(irqs, int, &num_irqs, 0);
1056 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1057 " addresses separated by commas. Only use if an interface"
1058 " has an interrupt. Otherwise, set it to zero or leave"
1060 module_param_array(regspacings, int, &num_regspacings, 0);
1061 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1062 " and each successive register used by the interface. For"
1063 " instance, if the start address is 0xca2 and the spacing"
1064 " is 2, then the second address is at 0xca4. Defaults"
1066 module_param_array(regsizes, int, &num_regsizes, 0);
1067 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1068 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1069 " 16-bit, 32-bit, or 64-bit register. Use this if you"
1070 " the 8-bit IPMI register has to be read from a larger"
1072 module_param_array(regshifts, int, &num_regshifts, 0);
1073 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1074 " IPMI register, in bits. For instance, if the data"
1075 " is read from a 32-bit word and the IPMI data is in"
1076 " bit 8-15, then the shift would be 8");
1077 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1078 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1079 " the controller. Normally this is 0x20, but can be"
1080 " overridden by this parm. This is an array indexed"
1081 " by interface number.");
1082 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1083 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1084 " disabled(0). Normally the IPMI driver auto-detects"
1085 " this, but the value may be overridden by this parm.");
1086 module_param(unload_when_empty, int, 0);
1087 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1088 " specified or found, default is 1. Setting to 0"
1089 " is useful for hot add of devices using hotmod.");
1092 static void std_irq_cleanup(struct smi_info *info)
1094 if (info->si_type == SI_BT)
1095 /* Disable the interrupt in the BT interface. */
1096 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1097 free_irq(info->irq, info);
1100 static int std_irq_setup(struct smi_info *info)
1107 if (info->si_type == SI_BT) {
1108 rv = request_irq(info->irq,
1114 /* Enable the interrupt in the BT interface. */
1115 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1116 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1118 rv = request_irq(info->irq,
1125 "ipmi_si: %s unable to claim interrupt %d,"
1126 " running polled\n",
1127 DEVICE_NAME, info->irq);
1130 info->irq_cleanup = std_irq_cleanup;
1131 printk(" Using irq %d\n", info->irq);
1137 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1139 unsigned int addr = io->addr_data;
1141 return inb(addr + (offset * io->regspacing));
1144 static void port_outb(struct si_sm_io *io, unsigned int offset,
1147 unsigned int addr = io->addr_data;
1149 outb(b, addr + (offset * io->regspacing));
1152 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1154 unsigned int addr = io->addr_data;
1156 return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1159 static void port_outw(struct si_sm_io *io, unsigned int offset,
1162 unsigned int addr = io->addr_data;
1164 outw(b << io->regshift, addr + (offset * io->regspacing));
1167 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1169 unsigned int addr = io->addr_data;
1171 return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1174 static void port_outl(struct si_sm_io *io, unsigned int offset,
1177 unsigned int addr = io->addr_data;
1179 outl(b << io->regshift, addr+(offset * io->regspacing));
1182 static void port_cleanup(struct smi_info *info)
1184 unsigned int addr = info->io.addr_data;
1188 for (idx = 0; idx < info->io_size; idx++) {
1189 release_region(addr + idx * info->io.regspacing,
1195 static int port_setup(struct smi_info *info)
1197 unsigned int addr = info->io.addr_data;
1203 info->io_cleanup = port_cleanup;
1205 /* Figure out the actual inb/inw/inl/etc routine to use based
1206 upon the register size. */
1207 switch (info->io.regsize) {
1209 info->io.inputb = port_inb;
1210 info->io.outputb = port_outb;
1213 info->io.inputb = port_inw;
1214 info->io.outputb = port_outw;
1217 info->io.inputb = port_inl;
1218 info->io.outputb = port_outl;
1221 printk("ipmi_si: Invalid register size: %d\n",
1226 /* Some BIOSes reserve disjoint I/O regions in their ACPI
1227 * tables. This causes problems when trying to register the
1228 * entire I/O region. Therefore we must register each I/O
1231 for (idx = 0; idx < info->io_size; idx++) {
1232 if (request_region(addr + idx * info->io.regspacing,
1233 info->io.regsize, DEVICE_NAME) == NULL) {
1234 /* Undo allocations */
1236 release_region(addr + idx * info->io.regspacing,
1245 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1247 return readb((io->addr)+(offset * io->regspacing));
1250 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1253 writeb(b, (io->addr)+(offset * io->regspacing));
1256 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1258 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1262 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1265 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1268 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1270 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1274 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1277 writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1281 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1283 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1287 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1290 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1294 static void mem_cleanup(struct smi_info *info)
1296 unsigned long addr = info->io.addr_data;
1299 if (info->io.addr) {
1300 iounmap(info->io.addr);
1302 mapsize = ((info->io_size * info->io.regspacing)
1303 - (info->io.regspacing - info->io.regsize));
1305 release_mem_region(addr, mapsize);
1309 static int mem_setup(struct smi_info *info)
1311 unsigned long addr = info->io.addr_data;
1317 info->io_cleanup = mem_cleanup;
1319 /* Figure out the actual readb/readw/readl/etc routine to use based
1320 upon the register size. */
1321 switch (info->io.regsize) {
1323 info->io.inputb = intf_mem_inb;
1324 info->io.outputb = intf_mem_outb;
1327 info->io.inputb = intf_mem_inw;
1328 info->io.outputb = intf_mem_outw;
1331 info->io.inputb = intf_mem_inl;
1332 info->io.outputb = intf_mem_outl;
1336 info->io.inputb = mem_inq;
1337 info->io.outputb = mem_outq;
1341 printk("ipmi_si: Invalid register size: %d\n",
1346 /* Calculate the total amount of memory to claim. This is an
1347 * unusual looking calculation, but it avoids claiming any
1348 * more memory than it has to. It will claim everything
1349 * between the first address to the end of the last full
1351 mapsize = ((info->io_size * info->io.regspacing)
1352 - (info->io.regspacing - info->io.regsize));
1354 if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1357 info->io.addr = ioremap(addr, mapsize);
1358 if (info->io.addr == NULL) {
1359 release_mem_region(addr, mapsize);
1366 * Parms come in as <op1>[:op2[:op3...]]. ops are:
1367 * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1375 enum hotmod_op { HM_ADD, HM_REMOVE };
1376 struct hotmod_vals {
1380 static struct hotmod_vals hotmod_ops[] = {
1382 { "remove", HM_REMOVE },
1385 static struct hotmod_vals hotmod_si[] = {
1387 { "smic", SI_SMIC },
1391 static struct hotmod_vals hotmod_as[] = {
1392 { "mem", IPMI_MEM_ADDR_SPACE },
1393 { "i/o", IPMI_IO_ADDR_SPACE },
1396 static int ipmi_strcasecmp(const char *s1, const char *s2)
1398 while (*s1 || *s2) {
1410 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1415 s = strchr(*curr, ',');
1417 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1422 for (i = 0; hotmod_ops[i].name; i++) {
1423 if (ipmi_strcasecmp(*curr, v[i].name) == 0) {
1430 printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1434 static int hotmod_handler(const char *val, struct kernel_param *kp)
1436 char *str = kstrdup(val, GFP_KERNEL);
1438 char *next, *curr, *s, *n, *o;
1440 enum si_type si_type;
1449 struct smi_info *info;
1454 /* Kill any trailing spaces, as we can get a "\n" from echo. */
1455 ival = strlen(str) - 1;
1456 while ((ival >= 0) && isspace(str[ival])) {
1461 for (curr = str; curr; curr = next) {
1468 next = strchr(curr, ':');
1474 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1479 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1484 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1488 s = strchr(curr, ',');
1493 addr = simple_strtoul(curr, &n, 0);
1494 if ((*n != '\0') || (*curr == '\0')) {
1495 printk(KERN_WARNING PFX "Invalid hotmod address"
1502 s = strchr(curr, ',');
1507 o = strchr(curr, '=');
1512 #define HOTMOD_INT_OPT(name, val) \
1513 if (ipmi_strcasecmp(curr, name) == 0) { \
1515 printk(KERN_WARNING PFX \
1516 "No option given for '%s'\n", \
1520 val = simple_strtoul(o, &n, 0); \
1521 if ((*n != '\0') || (*o == '\0')) { \
1522 printk(KERN_WARNING PFX \
1523 "Bad option given for '%s'\n", \
1529 HOTMOD_INT_OPT("rsp", regspacing)
1530 else HOTMOD_INT_OPT("rsi", regsize)
1531 else HOTMOD_INT_OPT("rsh", regshift)
1532 else HOTMOD_INT_OPT("irq", irq)
1533 else HOTMOD_INT_OPT("ipmb", ipmb)
1535 printk(KERN_WARNING PFX
1536 "Invalid hotmod option '%s'\n",
1540 #undef HOTMOD_INT_OPT
1544 info = kzalloc(sizeof(*info), GFP_KERNEL);
1550 info->addr_source = "hotmod";
1551 info->si_type = si_type;
1552 info->io.addr_data = addr;
1553 info->io.addr_type = addr_space;
1554 if (addr_space == IPMI_MEM_ADDR_SPACE)
1555 info->io_setup = mem_setup;
1557 info->io_setup = port_setup;
1559 info->io.addr = NULL;
1560 info->io.regspacing = regspacing;
1561 if (!info->io.regspacing)
1562 info->io.regspacing = DEFAULT_REGSPACING;
1563 info->io.regsize = regsize;
1564 if (!info->io.regsize)
1565 info->io.regsize = DEFAULT_REGSPACING;
1566 info->io.regshift = regshift;
1569 info->irq_setup = std_irq_setup;
1570 info->slave_addr = ipmb;
1575 struct smi_info *e, *tmp_e;
1577 mutex_lock(&smi_infos_lock);
1578 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1579 if (e->io.addr_type != addr_space)
1581 if (e->si_type != si_type)
1583 if (e->io.addr_data == addr)
1586 mutex_unlock(&smi_infos_lock);
1594 static __devinit void hardcode_find_bmc(void)
1597 struct smi_info *info;
1599 for (i = 0; i < SI_MAX_PARMS; i++) {
1600 if (!ports[i] && !addrs[i])
1603 info = kzalloc(sizeof(*info), GFP_KERNEL);
1607 info->addr_source = "hardcoded";
1609 if (!si_type[i] || ipmi_strcasecmp(si_type[i], "kcs") == 0) {
1610 info->si_type = SI_KCS;
1611 } else if (ipmi_strcasecmp(si_type[i], "smic") == 0) {
1612 info->si_type = SI_SMIC;
1613 } else if (ipmi_strcasecmp(si_type[i], "bt") == 0) {
1614 info->si_type = SI_BT;
1617 "ipmi_si: Interface type specified "
1618 "for interface %d, was invalid: %s\n",
1626 info->io_setup = port_setup;
1627 info->io.addr_data = ports[i];
1628 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1629 } else if (addrs[i]) {
1631 info->io_setup = mem_setup;
1632 info->io.addr_data = addrs[i];
1633 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1636 "ipmi_si: Interface type specified "
1637 "for interface %d, "
1638 "but port and address were not set or "
1639 "set to zero.\n", i);
1644 info->io.addr = NULL;
1645 info->io.regspacing = regspacings[i];
1646 if (!info->io.regspacing)
1647 info->io.regspacing = DEFAULT_REGSPACING;
1648 info->io.regsize = regsizes[i];
1649 if (!info->io.regsize)
1650 info->io.regsize = DEFAULT_REGSPACING;
1651 info->io.regshift = regshifts[i];
1652 info->irq = irqs[i];
1654 info->irq_setup = std_irq_setup;
1662 #include <linux/acpi.h>
1664 /* Once we get an ACPI failure, we don't try any more, because we go
1665 through the tables sequentially. Once we don't find a table, there
1667 static int acpi_failure = 0;
1669 /* For GPE-type interrupts. */
1670 static u32 ipmi_acpi_gpe(void *context)
1672 struct smi_info *smi_info = context;
1673 unsigned long flags;
1678 spin_lock_irqsave(&(smi_info->si_lock), flags);
1680 spin_lock(&smi_info->count_lock);
1681 smi_info->interrupts++;
1682 spin_unlock(&smi_info->count_lock);
1684 if (atomic_read(&smi_info->stop_operation))
1688 do_gettimeofday(&t);
1689 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1691 smi_event_handler(smi_info, 0);
1693 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1695 return ACPI_INTERRUPT_HANDLED;
1698 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1703 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1706 static int acpi_gpe_irq_setup(struct smi_info *info)
1713 /* FIXME - is level triggered right? */
1714 status = acpi_install_gpe_handler(NULL,
1716 ACPI_GPE_LEVEL_TRIGGERED,
1719 if (status != AE_OK) {
1721 "ipmi_si: %s unable to claim ACPI GPE %d,"
1722 " running polled\n",
1723 DEVICE_NAME, info->irq);
1727 info->irq_cleanup = acpi_gpe_irq_cleanup;
1728 printk(" Using ACPI GPE %d\n", info->irq);
1735 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1746 s8 CreatorRevision[4];
1749 s16 SpecificationRevision;
1752 * Bit 0 - SCI interrupt supported
1753 * Bit 1 - I/O APIC/SAPIC
1757 /* If bit 0 of InterruptType is set, then this is the SCI
1758 interrupt in the GPEx_STS register. */
1763 /* If bit 1 of InterruptType is set, then this is the I/O
1764 APIC/SAPIC interrupt. */
1765 u32 GlobalSystemInterrupt;
1767 /* The actual register address. */
1768 struct acpi_generic_address addr;
1772 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1775 static __devinit int try_init_acpi(struct SPMITable *spmi)
1777 struct smi_info *info;
1781 if (spmi->IPMIlegacy != 1) {
1782 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1786 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1787 addr_space = IPMI_MEM_ADDR_SPACE;
1789 addr_space = IPMI_IO_ADDR_SPACE;
1791 info = kzalloc(sizeof(*info), GFP_KERNEL);
1793 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1797 info->addr_source = "ACPI";
1799 /* Figure out the interface type. */
1800 switch (spmi->InterfaceType)
1803 info->si_type = SI_KCS;
1806 info->si_type = SI_SMIC;
1809 info->si_type = SI_BT;
1812 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1813 spmi->InterfaceType);
1818 if (spmi->InterruptType & 1) {
1819 /* We've got a GPE interrupt. */
1820 info->irq = spmi->GPE;
1821 info->irq_setup = acpi_gpe_irq_setup;
1822 } else if (spmi->InterruptType & 2) {
1823 /* We've got an APIC/SAPIC interrupt. */
1824 info->irq = spmi->GlobalSystemInterrupt;
1825 info->irq_setup = std_irq_setup;
1827 /* Use the default interrupt setting. */
1829 info->irq_setup = NULL;
1832 if (spmi->addr.register_bit_width) {
1833 /* A (hopefully) properly formed register bit width. */
1834 info->io.regspacing = spmi->addr.register_bit_width / 8;
1836 info->io.regspacing = DEFAULT_REGSPACING;
1838 info->io.regsize = info->io.regspacing;
1839 info->io.regshift = spmi->addr.register_bit_offset;
1841 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1843 info->io_setup = mem_setup;
1844 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1845 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1847 info->io_setup = port_setup;
1848 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1851 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1854 info->io.addr_data = spmi->addr.address;
1861 static __devinit void acpi_find_bmc(void)
1864 struct SPMITable *spmi;
1873 for (i = 0; ; i++) {
1874 status = acpi_get_firmware_table("SPMI", i+1,
1875 ACPI_LOGICAL_ADDRESSING,
1876 (struct acpi_table_header **)
1878 if (status != AE_OK)
1881 try_init_acpi(spmi);
1887 struct dmi_ipmi_data
1891 unsigned long base_addr;
1897 static int __devinit decode_dmi(struct dmi_header *dm,
1898 struct dmi_ipmi_data *dmi)
1900 u8 *data = (u8 *)dm;
1901 unsigned long base_addr;
1903 u8 len = dm->length;
1905 dmi->type = data[4];
1907 memcpy(&base_addr, data+8, sizeof(unsigned long));
1909 if (base_addr & 1) {
1911 base_addr &= 0xFFFE;
1912 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1916 dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1918 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1920 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1922 dmi->irq = data[0x11];
1924 /* The top two bits of byte 0x10 hold the register spacing. */
1925 reg_spacing = (data[0x10] & 0xC0) >> 6;
1926 switch(reg_spacing){
1927 case 0x00: /* Byte boundaries */
1930 case 0x01: /* 32-bit boundaries */
1933 case 0x02: /* 16-byte boundaries */
1937 /* Some other interface, just ignore it. */
1942 /* Note that technically, the lower bit of the base
1943 * address should be 1 if the address is I/O and 0 if
1944 * the address is in memory. So many systems get that
1945 * wrong (and all that I have seen are I/O) so we just
1946 * ignore that bit and assume I/O. Systems that use
1947 * memory should use the newer spec, anyway. */
1948 dmi->base_addr = base_addr & 0xfffe;
1949 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1953 dmi->slave_addr = data[6];
1958 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1960 struct smi_info *info;
1962 info = kzalloc(sizeof(*info), GFP_KERNEL);
1965 "ipmi_si: Could not allocate SI data\n");
1969 info->addr_source = "SMBIOS";
1971 switch (ipmi_data->type) {
1972 case 0x01: /* KCS */
1973 info->si_type = SI_KCS;
1975 case 0x02: /* SMIC */
1976 info->si_type = SI_SMIC;
1979 info->si_type = SI_BT;
1985 switch (ipmi_data->addr_space) {
1986 case IPMI_MEM_ADDR_SPACE:
1987 info->io_setup = mem_setup;
1988 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1991 case IPMI_IO_ADDR_SPACE:
1992 info->io_setup = port_setup;
1993 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1999 "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2000 ipmi_data->addr_space);
2003 info->io.addr_data = ipmi_data->base_addr;
2005 info->io.regspacing = ipmi_data->offset;
2006 if (!info->io.regspacing)
2007 info->io.regspacing = DEFAULT_REGSPACING;
2008 info->io.regsize = DEFAULT_REGSPACING;
2009 info->io.regshift = 0;
2011 info->slave_addr = ipmi_data->slave_addr;
2013 info->irq = ipmi_data->irq;
2015 info->irq_setup = std_irq_setup;
2020 static void __devinit dmi_find_bmc(void)
2022 struct dmi_device *dev = NULL;
2023 struct dmi_ipmi_data data;
2026 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2027 memset(&data, 0, sizeof(data));
2028 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
2030 try_init_dmi(&data);
2033 #endif /* CONFIG_DMI */
2037 #define PCI_ERMC_CLASSCODE 0x0C0700
2038 #define PCI_ERMC_CLASSCODE_MASK 0xffffff00
2039 #define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
2040 #define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
2041 #define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
2042 #define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
2044 #define PCI_HP_VENDOR_ID 0x103C
2045 #define PCI_MMC_DEVICE_ID 0x121A
2046 #define PCI_MMC_ADDR_CW 0x10
2048 static void ipmi_pci_cleanup(struct smi_info *info)
2050 struct pci_dev *pdev = info->addr_source_data;
2052 pci_disable_device(pdev);
2055 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2056 const struct pci_device_id *ent)
2059 int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2060 struct smi_info *info;
2061 int first_reg_offset = 0;
2063 info = kzalloc(sizeof(*info), GFP_KERNEL);
2067 info->addr_source = "PCI";
2069 switch (class_type) {
2070 case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2071 info->si_type = SI_SMIC;
2074 case PCI_ERMC_CLASSCODE_TYPE_KCS:
2075 info->si_type = SI_KCS;
2078 case PCI_ERMC_CLASSCODE_TYPE_BT:
2079 info->si_type = SI_BT;
2084 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
2085 pci_name(pdev), class_type);
2089 rv = pci_enable_device(pdev);
2091 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
2097 info->addr_source_cleanup = ipmi_pci_cleanup;
2098 info->addr_source_data = pdev;
2100 if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
2101 first_reg_offset = 1;
2103 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2104 info->io_setup = port_setup;
2105 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2107 info->io_setup = mem_setup;
2108 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2110 info->io.addr_data = pci_resource_start(pdev, 0);
2112 info->io.regspacing = DEFAULT_REGSPACING;
2113 info->io.regsize = DEFAULT_REGSPACING;
2114 info->io.regshift = 0;
2116 info->irq = pdev->irq;
2118 info->irq_setup = std_irq_setup;
2120 info->dev = &pdev->dev;
2122 return try_smi_init(info);
2125 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2130 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2135 static int ipmi_pci_resume(struct pci_dev *pdev)
2141 static struct pci_device_id ipmi_pci_devices[] = {
2142 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2143 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }
2145 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2147 static struct pci_driver ipmi_pci_driver = {
2148 .name = DEVICE_NAME,
2149 .id_table = ipmi_pci_devices,
2150 .probe = ipmi_pci_probe,
2151 .remove = __devexit_p(ipmi_pci_remove),
2153 .suspend = ipmi_pci_suspend,
2154 .resume = ipmi_pci_resume,
2157 #endif /* CONFIG_PCI */
2160 static int try_get_dev_id(struct smi_info *smi_info)
2162 unsigned char msg[2];
2163 unsigned char *resp;
2164 unsigned long resp_len;
2165 enum si_sm_result smi_result;
2168 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2172 /* Do a Get Device ID command, since it comes back with some
2174 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2175 msg[1] = IPMI_GET_DEVICE_ID_CMD;
2176 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2178 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2181 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2182 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2183 schedule_timeout_uninterruptible(1);
2184 smi_result = smi_info->handlers->event(
2185 smi_info->si_sm, 100);
2187 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2189 smi_result = smi_info->handlers->event(
2190 smi_info->si_sm, 0);
2195 if (smi_result == SI_SM_HOSED) {
2196 /* We couldn't get the state machine to run, so whatever's at
2197 the port is probably not an IPMI SMI interface. */
2202 /* Otherwise, we got some data. */
2203 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2204 resp, IPMI_MAX_MSG_LENGTH);
2205 if (resp_len < 14) {
2206 /* That's odd, it should be longer. */
2211 if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
2212 /* That's odd, it shouldn't be able to fail. */
2217 /* Record info from the get device id, in case we need it. */
2218 ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
2225 static int type_file_read_proc(char *page, char **start, off_t off,
2226 int count, int *eof, void *data)
2228 struct smi_info *smi = data;
2230 return sprintf(page, "%s\n", si_to_str[smi->si_type]);
2233 static int stat_file_read_proc(char *page, char **start, off_t off,
2234 int count, int *eof, void *data)
2236 char *out = (char *) page;
2237 struct smi_info *smi = data;
2239 out += sprintf(out, "interrupts_enabled: %d\n",
2240 smi->irq && !smi->interrupt_disabled);
2241 out += sprintf(out, "short_timeouts: %ld\n",
2242 smi->short_timeouts);
2243 out += sprintf(out, "long_timeouts: %ld\n",
2244 smi->long_timeouts);
2245 out += sprintf(out, "timeout_restarts: %ld\n",
2246 smi->timeout_restarts);
2247 out += sprintf(out, "idles: %ld\n",
2249 out += sprintf(out, "interrupts: %ld\n",
2251 out += sprintf(out, "attentions: %ld\n",
2253 out += sprintf(out, "flag_fetches: %ld\n",
2255 out += sprintf(out, "hosed_count: %ld\n",
2257 out += sprintf(out, "complete_transactions: %ld\n",
2258 smi->complete_transactions);
2259 out += sprintf(out, "events: %ld\n",
2261 out += sprintf(out, "watchdog_pretimeouts: %ld\n",
2262 smi->watchdog_pretimeouts);
2263 out += sprintf(out, "incoming_messages: %ld\n",
2264 smi->incoming_messages);
2269 static int param_read_proc(char *page, char **start, off_t off,
2270 int count, int *eof, void *data)
2272 struct smi_info *smi = data;
2274 return sprintf(page,
2275 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2276 si_to_str[smi->si_type],
2277 addr_space_to_str[smi->io.addr_type],
2287 * oem_data_avail_to_receive_msg_avail
2288 * @info - smi_info structure with msg_flags set
2290 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2291 * Returns 1 indicating need to re-run handle_flags().
2293 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2295 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2301 * setup_dell_poweredge_oem_data_handler
2302 * @info - smi_info.device_id must be populated
2304 * Systems that match, but have firmware version < 1.40 may assert
2305 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2306 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
2307 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2308 * as RECEIVE_MSG_AVAIL instead.
2310 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2311 * assert the OEM[012] bits, and if it did, the driver would have to
2312 * change to handle that properly, we don't actually check for the
2314 * Device ID = 0x20 BMC on PowerEdge 8G servers
2315 * Device Revision = 0x80
2316 * Firmware Revision1 = 0x01 BMC version 1.40
2317 * Firmware Revision2 = 0x40 BCD encoded
2318 * IPMI Version = 0x51 IPMI 1.5
2319 * Manufacturer ID = A2 02 00 Dell IANA
2321 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2322 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2325 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2326 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2327 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2328 #define DELL_IANA_MFR_ID 0x0002a2
2329 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2331 struct ipmi_device_id *id = &smi_info->device_id;
2332 if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2333 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
2334 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2335 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2336 smi_info->oem_data_avail_handler =
2337 oem_data_avail_to_receive_msg_avail;
2339 else if (ipmi_version_major(id) < 1 ||
2340 (ipmi_version_major(id) == 1 &&
2341 ipmi_version_minor(id) < 5)) {
2342 smi_info->oem_data_avail_handler =
2343 oem_data_avail_to_receive_msg_avail;
2348 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2349 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2351 struct ipmi_smi_msg *msg = smi_info->curr_msg;
2353 /* Make it a reponse */
2354 msg->rsp[0] = msg->data[0] | 4;
2355 msg->rsp[1] = msg->data[1];
2356 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2358 smi_info->curr_msg = NULL;
2359 deliver_recv_msg(smi_info, msg);
2363 * dell_poweredge_bt_xaction_handler
2364 * @info - smi_info.device_id must be populated
2366 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2367 * not respond to a Get SDR command if the length of the data
2368 * requested is exactly 0x3A, which leads to command timeouts and no
2369 * data returned. This intercepts such commands, and causes userspace
2370 * callers to try again with a different-sized buffer, which succeeds.
2373 #define STORAGE_NETFN 0x0A
2374 #define STORAGE_CMD_GET_SDR 0x23
2375 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2376 unsigned long unused,
2379 struct smi_info *smi_info = in;
2380 unsigned char *data = smi_info->curr_msg->data;
2381 unsigned int size = smi_info->curr_msg->data_size;
2383 (data[0]>>2) == STORAGE_NETFN &&
2384 data[1] == STORAGE_CMD_GET_SDR &&
2386 return_hosed_msg_badsize(smi_info);
2392 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2393 .notifier_call = dell_poweredge_bt_xaction_handler,
2397 * setup_dell_poweredge_bt_xaction_handler
2398 * @info - smi_info.device_id must be filled in already
2400 * Fills in smi_info.device_id.start_transaction_pre_hook
2401 * when we know what function to use there.
2404 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2406 struct ipmi_device_id *id = &smi_info->device_id;
2407 if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2408 smi_info->si_type == SI_BT)
2409 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2413 * setup_oem_data_handler
2414 * @info - smi_info.device_id must be filled in already
2416 * Fills in smi_info.device_id.oem_data_available_handler
2417 * when we know what function to use there.
2420 static void setup_oem_data_handler(struct smi_info *smi_info)
2422 setup_dell_poweredge_oem_data_handler(smi_info);
2425 static void setup_xaction_handlers(struct smi_info *smi_info)
2427 setup_dell_poweredge_bt_xaction_handler(smi_info);
2430 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2432 if (smi_info->intf) {
2433 /* The timer and thread are only running if the
2434 interface has been started up and registered. */
2435 if (smi_info->thread != NULL)
2436 kthread_stop(smi_info->thread);
2437 del_timer_sync(&smi_info->si_timer);
2441 static __devinitdata struct ipmi_default_vals
2447 { .type = SI_KCS, .port = 0xca2 },
2448 { .type = SI_SMIC, .port = 0xca9 },
2449 { .type = SI_BT, .port = 0xe4 },
2453 static __devinit void default_find_bmc(void)
2455 struct smi_info *info;
2458 for (i = 0; ; i++) {
2459 if (!ipmi_defaults[i].port)
2462 info = kzalloc(sizeof(*info), GFP_KERNEL);
2466 info->addr_source = NULL;
2468 info->si_type = ipmi_defaults[i].type;
2469 info->io_setup = port_setup;
2470 info->io.addr_data = ipmi_defaults[i].port;
2471 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2473 info->io.addr = NULL;
2474 info->io.regspacing = DEFAULT_REGSPACING;
2475 info->io.regsize = DEFAULT_REGSPACING;
2476 info->io.regshift = 0;
2478 if (try_smi_init(info) == 0) {
2480 printk(KERN_INFO "ipmi_si: Found default %s state"
2481 " machine at %s address 0x%lx\n",
2482 si_to_str[info->si_type],
2483 addr_space_to_str[info->io.addr_type],
2484 info->io.addr_data);
2490 static int is_new_interface(struct smi_info *info)
2494 list_for_each_entry(e, &smi_infos, link) {
2495 if (e->io.addr_type != info->io.addr_type)
2497 if (e->io.addr_data == info->io.addr_data)
2504 static int try_smi_init(struct smi_info *new_smi)
2508 if (new_smi->addr_source) {
2509 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2510 " machine at %s address 0x%lx, slave address 0x%x,"
2512 new_smi->addr_source,
2513 si_to_str[new_smi->si_type],
2514 addr_space_to_str[new_smi->io.addr_type],
2515 new_smi->io.addr_data,
2516 new_smi->slave_addr, new_smi->irq);
2519 mutex_lock(&smi_infos_lock);
2520 if (!is_new_interface(new_smi)) {
2521 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2526 /* So we know not to free it unless we have allocated one. */
2527 new_smi->intf = NULL;
2528 new_smi->si_sm = NULL;
2529 new_smi->handlers = NULL;
2531 switch (new_smi->si_type) {
2533 new_smi->handlers = &kcs_smi_handlers;
2537 new_smi->handlers = &smic_smi_handlers;
2541 new_smi->handlers = &bt_smi_handlers;
2545 /* No support for anything else yet. */
2550 /* Allocate the state machine's data and initialize it. */
2551 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2552 if (!new_smi->si_sm) {
2553 printk(" Could not allocate state machine memory\n");
2557 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2560 /* Now that we know the I/O size, we can set up the I/O. */
2561 rv = new_smi->io_setup(new_smi);
2563 printk(" Could not set up I/O space\n");
2567 spin_lock_init(&(new_smi->si_lock));
2568 spin_lock_init(&(new_smi->msg_lock));
2569 spin_lock_init(&(new_smi->count_lock));
2571 /* Do low-level detection first. */
2572 if (new_smi->handlers->detect(new_smi->si_sm)) {
2573 if (new_smi->addr_source)
2574 printk(KERN_INFO "ipmi_si: Interface detection"
2580 /* Attempt a get device id command. If it fails, we probably
2581 don't have a BMC here. */
2582 rv = try_get_dev_id(new_smi);
2584 if (new_smi->addr_source)
2585 printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2586 " at this location\n");
2590 setup_oem_data_handler(new_smi);
2591 setup_xaction_handlers(new_smi);
2593 /* Try to claim any interrupts. */
2594 if (new_smi->irq_setup)
2595 new_smi->irq_setup(new_smi);
2597 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2598 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2599 new_smi->curr_msg = NULL;
2600 atomic_set(&new_smi->req_events, 0);
2601 new_smi->run_to_completion = 0;
2603 new_smi->interrupt_disabled = 0;
2604 atomic_set(&new_smi->stop_operation, 0);
2605 new_smi->intf_num = smi_num;
2608 /* Start clearing the flags before we enable interrupts or the
2609 timer to avoid racing with the timer. */
2610 start_clear_flags(new_smi);
2611 /* IRQ is defined to be set when non-zero. */
2613 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2615 if (!new_smi->dev) {
2616 /* If we don't already have a device from something
2617 * else (like PCI), then register a new one. */
2618 new_smi->pdev = platform_device_alloc("ipmi_si",
2623 " Unable to allocate platform device\n");
2626 new_smi->dev = &new_smi->pdev->dev;
2627 new_smi->dev->driver = &ipmi_driver;
2629 rv = platform_device_add(new_smi->pdev);
2633 " Unable to register system interface device:"
2638 new_smi->dev_registered = 1;
2641 rv = ipmi_register_smi(&handlers,
2643 &new_smi->device_id,
2646 new_smi->slave_addr);
2649 "ipmi_si: Unable to register device: error %d\n",
2651 goto out_err_stop_timer;
2654 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2655 type_file_read_proc, NULL,
2656 new_smi, THIS_MODULE);
2659 "ipmi_si: Unable to create proc entry: %d\n",
2661 goto out_err_stop_timer;
2664 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2665 stat_file_read_proc, NULL,
2666 new_smi, THIS_MODULE);
2669 "ipmi_si: Unable to create proc entry: %d\n",
2671 goto out_err_stop_timer;
2674 rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2675 param_read_proc, NULL,
2676 new_smi, THIS_MODULE);
2679 "ipmi_si: Unable to create proc entry: %d\n",
2681 goto out_err_stop_timer;
2684 list_add_tail(&new_smi->link, &smi_infos);
2686 mutex_unlock(&smi_infos_lock);
2688 printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2693 atomic_inc(&new_smi->stop_operation);
2694 wait_for_timer_and_thread(new_smi);
2698 ipmi_unregister_smi(new_smi->intf);
2700 if (new_smi->irq_cleanup)
2701 new_smi->irq_cleanup(new_smi);
2703 /* Wait until we know that we are out of any interrupt
2704 handlers might have been running before we freed the
2706 synchronize_sched();
2708 if (new_smi->si_sm) {
2709 if (new_smi->handlers)
2710 new_smi->handlers->cleanup(new_smi->si_sm);
2711 kfree(new_smi->si_sm);
2713 if (new_smi->addr_source_cleanup)
2714 new_smi->addr_source_cleanup(new_smi);
2715 if (new_smi->io_cleanup)
2716 new_smi->io_cleanup(new_smi);
2718 if (new_smi->dev_registered)
2719 platform_device_unregister(new_smi->pdev);
2723 mutex_unlock(&smi_infos_lock);
2728 static __devinit int init_ipmi_si(void)
2738 /* Register the device drivers. */
2739 rv = driver_register(&ipmi_driver);
2742 "init_ipmi_si: Unable to register driver: %d\n",
2748 /* Parse out the si_type string into its components. */
2751 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2753 str = strchr(str, ',');
2763 printk(KERN_INFO "IPMI System Interface driver.\n");
2765 hardcode_find_bmc();
2777 pci_module_init(&ipmi_pci_driver);
2780 if (si_trydefaults) {
2781 mutex_lock(&smi_infos_lock);
2782 if (list_empty(&smi_infos)) {
2783 /* No BMC was found, try defaults. */
2784 mutex_unlock(&smi_infos_lock);
2787 mutex_unlock(&smi_infos_lock);
2791 mutex_lock(&smi_infos_lock);
2792 if (unload_when_empty && list_empty(&smi_infos)) {
2793 mutex_unlock(&smi_infos_lock);
2795 pci_unregister_driver(&ipmi_pci_driver);
2797 driver_unregister(&ipmi_driver);
2798 printk("ipmi_si: Unable to find any System Interface(s)\n");
2801 mutex_unlock(&smi_infos_lock);
2805 module_init(init_ipmi_si);
2807 static void cleanup_one_si(struct smi_info *to_clean)
2810 unsigned long flags;
2815 list_del(&to_clean->link);
2817 /* Tell the timer and interrupt handlers that we are shutting
2819 spin_lock_irqsave(&(to_clean->si_lock), flags);
2820 spin_lock(&(to_clean->msg_lock));
2822 atomic_inc(&to_clean->stop_operation);
2824 if (to_clean->irq_cleanup)
2825 to_clean->irq_cleanup(to_clean);
2827 spin_unlock(&(to_clean->msg_lock));
2828 spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2830 /* Wait until we know that we are out of any interrupt
2831 handlers might have been running before we freed the
2833 synchronize_sched();
2835 wait_for_timer_and_thread(to_clean);
2837 /* Interrupts and timeouts are stopped, now make sure the
2838 interface is in a clean state. */
2839 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2841 schedule_timeout_uninterruptible(1);
2844 rv = ipmi_unregister_smi(to_clean->intf);
2847 "ipmi_si: Unable to unregister device: errno=%d\n",
2851 to_clean->handlers->cleanup(to_clean->si_sm);
2853 kfree(to_clean->si_sm);
2855 if (to_clean->addr_source_cleanup)
2856 to_clean->addr_source_cleanup(to_clean);
2857 if (to_clean->io_cleanup)
2858 to_clean->io_cleanup(to_clean);
2860 if (to_clean->dev_registered)
2861 platform_device_unregister(to_clean->pdev);
2866 static __exit void cleanup_ipmi_si(void)
2868 struct smi_info *e, *tmp_e;
2874 pci_unregister_driver(&ipmi_pci_driver);
2877 mutex_lock(&smi_infos_lock);
2878 list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2880 mutex_unlock(&smi_infos_lock);
2882 driver_unregister(&ipmi_driver);
2884 module_exit(cleanup_ipmi_si);
2886 MODULE_LICENSE("GPL");
2887 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2888 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");