4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
11 * Copyright 2002 MontaVista Software Inc.
12 * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2 of the License, or (at your
17 * option) any later version.
20 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
21 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
28 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
29 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * You should have received a copy of the GNU General Public License along
32 * with this program; if not, write to the Free Software Foundation, Inc.,
33 * 675 Mass Ave, Cambridge, MA 02139, USA.
37 * This file holds the "policy" for the interface to the SMI state
38 * machine. It does the configuration, handles timers and interrupts,
39 * and drives the real SMI state machine.
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/notifier.h>
55 #include <linux/mutex.h>
56 #include <linux/kthread.h>
58 #include <linux/interrupt.h>
59 #include <linux/rcupdate.h>
60 #include <linux/ipmi_smi.h>
62 #include "ipmi_si_sm.h"
63 #include <linux/init.h>
64 #include <linux/dmi.h>
65 #include <linux/string.h>
66 #include <linux/ctype.h>
69 #include <asm/of_device.h>
70 #include <asm/of_platform.h>
73 #define PFX "ipmi_si: "
75 /* Measure times between events in the driver. */
78 /* Call every 10 ms. */
79 #define SI_TIMEOUT_TIME_USEC 10000
80 #define SI_USEC_PER_JIFFY (1000000/HZ)
81 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
82 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
85 /* Bit for BMC global enables. */
86 #define IPMI_BMC_RCV_MSG_INTR 0x01
87 #define IPMI_BMC_EVT_MSG_INTR 0x02
88 #define IPMI_BMC_EVT_MSG_BUFF 0x04
89 #define IPMI_BMC_SYS_LOG 0x08
96 SI_CLEARING_FLAGS_THEN_SET_IRQ,
98 SI_ENABLE_INTERRUPTS1,
99 SI_ENABLE_INTERRUPTS2,
100 SI_DISABLE_INTERRUPTS1,
101 SI_DISABLE_INTERRUPTS2
102 /* FIXME - add watchdog stuff. */
105 /* Some BT-specific defines we need here. */
106 #define IPMI_BT_INTMASK_REG 2
107 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
108 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
111 SI_KCS, SI_SMIC, SI_BT
113 static char *si_to_str[] = { "kcs", "smic", "bt" };
115 #define DEVICE_NAME "ipmi_si"
117 static struct device_driver ipmi_driver =
120 .bus = &platform_bus_type
125 * Indexes into stats[] in smi_info below.
128 #define SI_STAT_short_timeouts 0
129 #define SI_STAT_long_timeouts 1
130 #define SI_STAT_timeout_restarts 2
131 #define SI_STAT_idles 3
132 #define SI_STAT_interrupts 4
133 #define SI_STAT_attentions 5
134 #define SI_STAT_flag_fetches 6
135 #define SI_STAT_hosed_count 7
136 #define SI_STAT_complete_transactions 8
137 #define SI_STAT_events 9
138 #define SI_STAT_watchdog_pretimeouts 10
139 #define SI_STAT_incoming_messages 11
141 /* If you add a stat, you must update this value. */
142 #define SI_NUM_STATS 12
148 struct si_sm_data *si_sm;
149 struct si_sm_handlers *handlers;
150 enum si_type si_type;
153 struct list_head xmit_msgs;
154 struct list_head hp_xmit_msgs;
155 struct ipmi_smi_msg *curr_msg;
156 enum si_intf_state si_state;
158 /* Used to handle the various types of I/O that can occur with
161 int (*io_setup)(struct smi_info *info);
162 void (*io_cleanup)(struct smi_info *info);
163 int (*irq_setup)(struct smi_info *info);
164 void (*irq_cleanup)(struct smi_info *info);
165 unsigned int io_size;
166 char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
167 void (*addr_source_cleanup)(struct smi_info *info);
168 void *addr_source_data;
170 /* Per-OEM handler, called from handle_flags().
171 Returns 1 when handle_flags() needs to be re-run
172 or 0 indicating it set si_state itself.
174 int (*oem_data_avail_handler)(struct smi_info *smi_info);
176 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
177 is set to hold the flags until we are done handling everything
179 #define RECEIVE_MSG_AVAIL 0x01
180 #define EVENT_MSG_BUFFER_FULL 0x02
181 #define WDT_PRE_TIMEOUT_INT 0x08
182 #define OEM0_DATA_AVAIL 0x20
183 #define OEM1_DATA_AVAIL 0x40
184 #define OEM2_DATA_AVAIL 0x80
185 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
188 unsigned char msg_flags;
190 /* If set to true, this will request events the next time the
191 state machine is idle. */
194 /* If true, run the state machine to completion on every send
195 call. Generally used after a panic to make sure stuff goes
197 int run_to_completion;
199 /* The I/O port of an SI interface. */
202 /* The space between start addresses of the two ports. For
203 instance, if the first port is 0xca2 and the spacing is 4, then
204 the second port is 0xca6. */
205 unsigned int spacing;
207 /* zero if no irq; */
210 /* The timer for this si. */
211 struct timer_list si_timer;
213 /* The time (in jiffies) the last timeout occurred at. */
214 unsigned long last_timeout_jiffies;
216 /* Used to gracefully stop the timer without race conditions. */
217 atomic_t stop_operation;
219 /* The driver will disable interrupts when it gets into a
220 situation where it cannot handle messages due to lack of
221 memory. Once that situation clears up, it will re-enable
223 int interrupt_disabled;
225 /* From the get device id response... */
226 struct ipmi_device_id device_id;
228 /* Driver model stuff. */
230 struct platform_device *pdev;
232 /* True if we allocated the device, false if it came from
233 * someplace else (like PCI). */
236 /* Slave address, could be reported from DMI. */
237 unsigned char slave_addr;
239 /* Counters and things for the proc filesystem. */
240 atomic_t stats[SI_NUM_STATS];
242 struct task_struct *thread;
244 struct list_head link;
247 #define smi_inc_stat(smi, stat) \
248 atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
249 #define smi_get_stat(smi, stat) \
250 ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
252 #define SI_MAX_PARMS 4
254 static int force_kipmid[SI_MAX_PARMS];
255 static int num_force_kipmid;
257 static int unload_when_empty = 1;
259 static int try_smi_init(struct smi_info *smi);
260 static void cleanup_one_si(struct smi_info *to_clean);
262 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
263 static int register_xaction_notifier(struct notifier_block * nb)
265 return atomic_notifier_chain_register(&xaction_notifier_list, nb);
268 static void deliver_recv_msg(struct smi_info *smi_info,
269 struct ipmi_smi_msg *msg)
271 /* Deliver the message to the upper layer with the lock
273 spin_unlock(&(smi_info->si_lock));
274 ipmi_smi_msg_received(smi_info->intf, msg);
275 spin_lock(&(smi_info->si_lock));
278 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
280 struct ipmi_smi_msg *msg = smi_info->curr_msg;
282 if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
283 cCode = IPMI_ERR_UNSPECIFIED;
284 /* else use it as is */
286 /* Make it a reponse */
287 msg->rsp[0] = msg->data[0] | 4;
288 msg->rsp[1] = msg->data[1];
292 smi_info->curr_msg = NULL;
293 deliver_recv_msg(smi_info, msg);
296 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
299 struct list_head *entry = NULL;
304 /* No need to save flags, we aleady have interrupts off and we
305 already hold the SMI lock. */
306 if (!smi_info->run_to_completion)
307 spin_lock(&(smi_info->msg_lock));
309 /* Pick the high priority queue first. */
310 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
311 entry = smi_info->hp_xmit_msgs.next;
312 } else if (!list_empty(&(smi_info->xmit_msgs))) {
313 entry = smi_info->xmit_msgs.next;
317 smi_info->curr_msg = NULL;
323 smi_info->curr_msg = list_entry(entry,
328 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
330 err = atomic_notifier_call_chain(&xaction_notifier_list,
332 if (err & NOTIFY_STOP_MASK) {
333 rv = SI_SM_CALL_WITHOUT_DELAY;
336 err = smi_info->handlers->start_transaction(
338 smi_info->curr_msg->data,
339 smi_info->curr_msg->data_size);
341 return_hosed_msg(smi_info, err);
344 rv = SI_SM_CALL_WITHOUT_DELAY;
347 if (!smi_info->run_to_completion)
348 spin_unlock(&(smi_info->msg_lock));
353 static void start_enable_irq(struct smi_info *smi_info)
355 unsigned char msg[2];
357 /* If we are enabling interrupts, we have to tell the
359 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
360 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
362 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
363 smi_info->si_state = SI_ENABLE_INTERRUPTS1;
366 static void start_disable_irq(struct smi_info *smi_info)
368 unsigned char msg[2];
370 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
371 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
373 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
374 smi_info->si_state = SI_DISABLE_INTERRUPTS1;
377 static void start_clear_flags(struct smi_info *smi_info)
379 unsigned char msg[3];
381 /* Make sure the watchdog pre-timeout flag is not set at startup. */
382 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
383 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
384 msg[2] = WDT_PRE_TIMEOUT_INT;
386 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
387 smi_info->si_state = SI_CLEARING_FLAGS;
390 /* When we have a situtaion where we run out of memory and cannot
391 allocate messages, we just leave them in the BMC and run the system
392 polled until we can allocate some memory. Once we have some
393 memory, we will re-enable the interrupt. */
394 static inline void disable_si_irq(struct smi_info *smi_info)
396 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
397 start_disable_irq(smi_info);
398 smi_info->interrupt_disabled = 1;
402 static inline void enable_si_irq(struct smi_info *smi_info)
404 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
405 start_enable_irq(smi_info);
406 smi_info->interrupt_disabled = 0;
410 static void handle_flags(struct smi_info *smi_info)
413 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
414 /* Watchdog pre-timeout */
415 smi_inc_stat(smi_info, watchdog_pretimeouts);
417 start_clear_flags(smi_info);
418 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
419 spin_unlock(&(smi_info->si_lock));
420 ipmi_smi_watchdog_pretimeout(smi_info->intf);
421 spin_lock(&(smi_info->si_lock));
422 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
423 /* Messages available. */
424 smi_info->curr_msg = ipmi_alloc_smi_msg();
425 if (!smi_info->curr_msg) {
426 disable_si_irq(smi_info);
427 smi_info->si_state = SI_NORMAL;
430 enable_si_irq(smi_info);
432 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
433 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
434 smi_info->curr_msg->data_size = 2;
436 smi_info->handlers->start_transaction(
438 smi_info->curr_msg->data,
439 smi_info->curr_msg->data_size);
440 smi_info->si_state = SI_GETTING_MESSAGES;
441 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
442 /* Events available. */
443 smi_info->curr_msg = ipmi_alloc_smi_msg();
444 if (!smi_info->curr_msg) {
445 disable_si_irq(smi_info);
446 smi_info->si_state = SI_NORMAL;
449 enable_si_irq(smi_info);
451 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
452 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
453 smi_info->curr_msg->data_size = 2;
455 smi_info->handlers->start_transaction(
457 smi_info->curr_msg->data,
458 smi_info->curr_msg->data_size);
459 smi_info->si_state = SI_GETTING_EVENTS;
460 } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
461 smi_info->oem_data_avail_handler) {
462 if (smi_info->oem_data_avail_handler(smi_info))
465 smi_info->si_state = SI_NORMAL;
469 static void handle_transaction_done(struct smi_info *smi_info)
471 struct ipmi_smi_msg *msg;
476 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
478 switch (smi_info->si_state) {
480 if (!smi_info->curr_msg)
483 smi_info->curr_msg->rsp_size
484 = smi_info->handlers->get_result(
486 smi_info->curr_msg->rsp,
487 IPMI_MAX_MSG_LENGTH);
489 /* Do this here becase deliver_recv_msg() releases the
490 lock, and a new message can be put in during the
491 time the lock is released. */
492 msg = smi_info->curr_msg;
493 smi_info->curr_msg = NULL;
494 deliver_recv_msg(smi_info, msg);
497 case SI_GETTING_FLAGS:
499 unsigned char msg[4];
502 /* We got the flags from the SMI, now handle them. */
503 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
505 /* Error fetching flags, just give up for
507 smi_info->si_state = SI_NORMAL;
508 } else if (len < 4) {
509 /* Hmm, no flags. That's technically illegal, but
510 don't use uninitialized data. */
511 smi_info->si_state = SI_NORMAL;
513 smi_info->msg_flags = msg[3];
514 handle_flags(smi_info);
519 case SI_CLEARING_FLAGS:
520 case SI_CLEARING_FLAGS_THEN_SET_IRQ:
522 unsigned char msg[3];
524 /* We cleared the flags. */
525 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
527 /* Error clearing flags */
529 "ipmi_si: Error clearing flags: %2.2x\n",
532 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
533 start_enable_irq(smi_info);
535 smi_info->si_state = SI_NORMAL;
539 case SI_GETTING_EVENTS:
541 smi_info->curr_msg->rsp_size
542 = smi_info->handlers->get_result(
544 smi_info->curr_msg->rsp,
545 IPMI_MAX_MSG_LENGTH);
547 /* Do this here becase deliver_recv_msg() releases the
548 lock, and a new message can be put in during the
549 time the lock is released. */
550 msg = smi_info->curr_msg;
551 smi_info->curr_msg = NULL;
552 if (msg->rsp[2] != 0) {
553 /* Error getting event, probably done. */
556 /* Take off the event flag. */
557 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
558 handle_flags(smi_info);
560 smi_inc_stat(smi_info, events);
562 /* Do this before we deliver the message
563 because delivering the message releases the
564 lock and something else can mess with the
566 handle_flags(smi_info);
568 deliver_recv_msg(smi_info, msg);
573 case SI_GETTING_MESSAGES:
575 smi_info->curr_msg->rsp_size
576 = smi_info->handlers->get_result(
578 smi_info->curr_msg->rsp,
579 IPMI_MAX_MSG_LENGTH);
581 /* Do this here becase deliver_recv_msg() releases the
582 lock, and a new message can be put in during the
583 time the lock is released. */
584 msg = smi_info->curr_msg;
585 smi_info->curr_msg = NULL;
586 if (msg->rsp[2] != 0) {
587 /* Error getting event, probably done. */
590 /* Take off the msg flag. */
591 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
592 handle_flags(smi_info);
594 smi_inc_stat(smi_info, incoming_messages);
596 /* Do this before we deliver the message
597 because delivering the message releases the
598 lock and something else can mess with the
600 handle_flags(smi_info);
602 deliver_recv_msg(smi_info, msg);
607 case SI_ENABLE_INTERRUPTS1:
609 unsigned char msg[4];
611 /* We got the flags from the SMI, now handle them. */
612 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
615 "ipmi_si: Could not enable interrupts"
616 ", failed get, using polled mode.\n");
617 smi_info->si_state = SI_NORMAL;
619 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
620 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
622 IPMI_BMC_RCV_MSG_INTR |
623 IPMI_BMC_EVT_MSG_INTR);
624 smi_info->handlers->start_transaction(
625 smi_info->si_sm, msg, 3);
626 smi_info->si_state = SI_ENABLE_INTERRUPTS2;
631 case SI_ENABLE_INTERRUPTS2:
633 unsigned char msg[4];
635 /* We got the flags from the SMI, now handle them. */
636 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
639 "ipmi_si: Could not enable interrupts"
640 ", failed set, using polled mode.\n");
642 smi_info->si_state = SI_NORMAL;
646 case SI_DISABLE_INTERRUPTS1:
648 unsigned char msg[4];
650 /* We got the flags from the SMI, now handle them. */
651 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
654 "ipmi_si: Could not disable interrupts"
656 smi_info->si_state = SI_NORMAL;
658 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
659 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
661 ~(IPMI_BMC_RCV_MSG_INTR |
662 IPMI_BMC_EVT_MSG_INTR));
663 smi_info->handlers->start_transaction(
664 smi_info->si_sm, msg, 3);
665 smi_info->si_state = SI_DISABLE_INTERRUPTS2;
670 case SI_DISABLE_INTERRUPTS2:
672 unsigned char msg[4];
674 /* We got the flags from the SMI, now handle them. */
675 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
678 "ipmi_si: Could not disable interrupts"
681 smi_info->si_state = SI_NORMAL;
687 /* Called on timeouts and events. Timeouts should pass the elapsed
688 time, interrupts should pass in zero. Must be called with
689 si_lock held and interrupts disabled. */
690 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
693 enum si_sm_result si_sm_result;
696 /* There used to be a loop here that waited a little while
697 (around 25us) before giving up. That turned out to be
698 pointless, the minimum delays I was seeing were in the 300us
699 range, which is far too long to wait in an interrupt. So
700 we just run until the state machine tells us something
701 happened or it needs a delay. */
702 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
704 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
706 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
709 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
711 smi_inc_stat(smi_info, complete_transactions);
713 handle_transaction_done(smi_info);
714 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
716 else if (si_sm_result == SI_SM_HOSED)
718 smi_inc_stat(smi_info, hosed_count);
720 /* Do the before return_hosed_msg, because that
721 releases the lock. */
722 smi_info->si_state = SI_NORMAL;
723 if (smi_info->curr_msg != NULL) {
724 /* If we were handling a user message, format
725 a response to send to the upper layer to
726 tell it about the error. */
727 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
729 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
733 * We prefer handling attn over new messages. But don't do
734 * this if there is not yet an upper layer to handle anything.
736 if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN)
738 unsigned char msg[2];
740 smi_inc_stat(smi_info, attentions);
742 /* Got a attn, send down a get message flags to see
743 what's causing it. It would be better to handle
744 this in the upper layer, but due to the way
745 interrupts work with the SMI, that's not really
747 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
748 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
750 smi_info->handlers->start_transaction(
751 smi_info->si_sm, msg, 2);
752 smi_info->si_state = SI_GETTING_FLAGS;
756 /* If we are currently idle, try to start the next message. */
757 if (si_sm_result == SI_SM_IDLE) {
758 smi_inc_stat(smi_info, idles);
760 si_sm_result = start_next_msg(smi_info);
761 if (si_sm_result != SI_SM_IDLE)
765 if ((si_sm_result == SI_SM_IDLE)
766 && (atomic_read(&smi_info->req_events)))
768 /* We are idle and the upper layer requested that I fetch
770 atomic_set(&smi_info->req_events, 0);
772 smi_info->curr_msg = ipmi_alloc_smi_msg();
773 if (!smi_info->curr_msg)
776 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
777 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
778 smi_info->curr_msg->data_size = 2;
780 smi_info->handlers->start_transaction(
782 smi_info->curr_msg->data,
783 smi_info->curr_msg->data_size);
784 smi_info->si_state = SI_GETTING_EVENTS;
791 static void sender(void *send_info,
792 struct ipmi_smi_msg *msg,
795 struct smi_info *smi_info = send_info;
796 enum si_sm_result result;
802 if (atomic_read(&smi_info->stop_operation)) {
803 msg->rsp[0] = msg->data[0] | 4;
804 msg->rsp[1] = msg->data[1];
805 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
807 deliver_recv_msg(smi_info, msg);
813 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
816 if (smi_info->run_to_completion) {
818 * If we are running to completion, then throw it in
819 * the list and run transactions until everything is
820 * clear. Priority doesn't matter here.
824 * Run to completion means we are single-threaded, no
827 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
829 result = smi_event_handler(smi_info, 0);
830 while (result != SI_SM_IDLE) {
831 udelay(SI_SHORT_TIMEOUT_USEC);
832 result = smi_event_handler(smi_info,
833 SI_SHORT_TIMEOUT_USEC);
838 spin_lock_irqsave(&smi_info->msg_lock, flags);
840 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
842 list_add_tail(&msg->link, &smi_info->xmit_msgs);
843 spin_unlock_irqrestore(&smi_info->msg_lock, flags);
845 spin_lock_irqsave(&smi_info->si_lock, flags);
846 if ((smi_info->si_state == SI_NORMAL)
847 && (smi_info->curr_msg == NULL))
849 start_next_msg(smi_info);
851 spin_unlock_irqrestore(&smi_info->si_lock, flags);
854 static void set_run_to_completion(void *send_info, int i_run_to_completion)
856 struct smi_info *smi_info = send_info;
857 enum si_sm_result result;
859 smi_info->run_to_completion = i_run_to_completion;
860 if (i_run_to_completion) {
861 result = smi_event_handler(smi_info, 0);
862 while (result != SI_SM_IDLE) {
863 udelay(SI_SHORT_TIMEOUT_USEC);
864 result = smi_event_handler(smi_info,
865 SI_SHORT_TIMEOUT_USEC);
870 static int ipmi_thread(void *data)
872 struct smi_info *smi_info = data;
874 enum si_sm_result smi_result;
876 set_user_nice(current, 19);
877 while (!kthread_should_stop()) {
878 spin_lock_irqsave(&(smi_info->si_lock), flags);
879 smi_result = smi_event_handler(smi_info, 0);
880 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
881 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
884 else if (smi_result == SI_SM_CALL_WITH_DELAY)
887 schedule_timeout_interruptible(1);
893 static void poll(void *send_info)
895 struct smi_info *smi_info = send_info;
899 * Make sure there is some delay in the poll loop so we can
900 * drive time forward and timeout things.
903 spin_lock_irqsave(&smi_info->si_lock, flags);
904 smi_event_handler(smi_info, 10);
905 spin_unlock_irqrestore(&smi_info->si_lock, flags);
908 static void request_events(void *send_info)
910 struct smi_info *smi_info = send_info;
912 if (atomic_read(&smi_info->stop_operation))
915 atomic_set(&smi_info->req_events, 1);
918 static int initialized;
920 static void smi_timeout(unsigned long data)
922 struct smi_info *smi_info = (struct smi_info *) data;
923 enum si_sm_result smi_result;
925 unsigned long jiffies_now;
931 spin_lock_irqsave(&(smi_info->si_lock), flags);
934 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
936 jiffies_now = jiffies;
937 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
938 * SI_USEC_PER_JIFFY);
939 smi_result = smi_event_handler(smi_info, time_diff);
941 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
943 smi_info->last_timeout_jiffies = jiffies_now;
945 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
946 /* Running with interrupts, only do long timeouts. */
947 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
948 smi_inc_stat(smi_info, long_timeouts);
952 /* If the state machine asks for a short delay, then shorten
953 the timer timeout. */
954 if (smi_result == SI_SM_CALL_WITH_DELAY) {
955 smi_inc_stat(smi_info, short_timeouts);
956 smi_info->si_timer.expires = jiffies + 1;
958 smi_inc_stat(smi_info, long_timeouts);
959 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
963 add_timer(&(smi_info->si_timer));
966 static irqreturn_t si_irq_handler(int irq, void *data)
968 struct smi_info *smi_info = data;
974 spin_lock_irqsave(&(smi_info->si_lock), flags);
976 smi_inc_stat(smi_info, interrupts);
980 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
982 smi_event_handler(smi_info, 0);
983 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
987 static irqreturn_t si_bt_irq_handler(int irq, void *data)
989 struct smi_info *smi_info = data;
990 /* We need to clear the IRQ flag for the BT interface. */
991 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
992 IPMI_BT_INTMASK_CLEAR_IRQ_BIT
993 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
994 return si_irq_handler(irq, data);
997 static int smi_start_processing(void *send_info,
1000 struct smi_info *new_smi = send_info;
1003 new_smi->intf = intf;
1005 /* Try to claim any interrupts. */
1006 if (new_smi->irq_setup)
1007 new_smi->irq_setup(new_smi);
1009 /* Set up the timer that drives the interface. */
1010 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1011 new_smi->last_timeout_jiffies = jiffies;
1012 mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
1015 * Check if the user forcefully enabled the daemon.
1017 if (new_smi->intf_num < num_force_kipmid)
1018 enable = force_kipmid[new_smi->intf_num];
1020 * The BT interface is efficient enough to not need a thread,
1021 * and there is no need for a thread if we have interrupts.
1023 else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
1027 new_smi->thread = kthread_run(ipmi_thread, new_smi,
1028 "kipmi%d", new_smi->intf_num);
1029 if (IS_ERR(new_smi->thread)) {
1030 printk(KERN_NOTICE "ipmi_si_intf: Could not start"
1031 " kernel thread due to error %ld, only using"
1032 " timers to drive the interface\n",
1033 PTR_ERR(new_smi->thread));
1034 new_smi->thread = NULL;
1041 static void set_maintenance_mode(void *send_info, int enable)
1043 struct smi_info *smi_info = send_info;
1046 atomic_set(&smi_info->req_events, 0);
1049 static struct ipmi_smi_handlers handlers =
1051 .owner = THIS_MODULE,
1052 .start_processing = smi_start_processing,
1054 .request_events = request_events,
1055 .set_maintenance_mode = set_maintenance_mode,
1056 .set_run_to_completion = set_run_to_completion,
1060 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1061 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
1063 static LIST_HEAD(smi_infos);
1064 static DEFINE_MUTEX(smi_infos_lock);
1065 static int smi_num; /* Used to sequence the SMIs */
1067 #define DEFAULT_REGSPACING 1
1068 #define DEFAULT_REGSIZE 1
1070 static int si_trydefaults = 1;
1071 static char *si_type[SI_MAX_PARMS];
1072 #define MAX_SI_TYPE_STR 30
1073 static char si_type_str[MAX_SI_TYPE_STR];
1074 static unsigned long addrs[SI_MAX_PARMS];
1075 static unsigned int num_addrs;
1076 static unsigned int ports[SI_MAX_PARMS];
1077 static unsigned int num_ports;
1078 static int irqs[SI_MAX_PARMS];
1079 static unsigned int num_irqs;
1080 static int regspacings[SI_MAX_PARMS];
1081 static unsigned int num_regspacings;
1082 static int regsizes[SI_MAX_PARMS];
1083 static unsigned int num_regsizes;
1084 static int regshifts[SI_MAX_PARMS];
1085 static unsigned int num_regshifts;
1086 static int slave_addrs[SI_MAX_PARMS];
1087 static unsigned int num_slave_addrs;
1089 #define IPMI_IO_ADDR_SPACE 0
1090 #define IPMI_MEM_ADDR_SPACE 1
1091 static char *addr_space_to_str[] = { "i/o", "mem" };
1093 static int hotmod_handler(const char *val, struct kernel_param *kp);
1095 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1096 MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
1097 " Documentation/IPMI.txt in the kernel sources for the"
1100 module_param_named(trydefaults, si_trydefaults, bool, 0);
1101 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1102 " default scan of the KCS and SMIC interface at the standard"
1104 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1105 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1106 " interface separated by commas. The types are 'kcs',"
1107 " 'smic', and 'bt'. For example si_type=kcs,bt will set"
1108 " the first interface to kcs and the second to bt");
1109 module_param_array(addrs, ulong, &num_addrs, 0);
1110 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1111 " addresses separated by commas. Only use if an interface"
1112 " is in memory. Otherwise, set it to zero or leave"
1114 module_param_array(ports, uint, &num_ports, 0);
1115 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1116 " addresses separated by commas. Only use if an interface"
1117 " is a port. Otherwise, set it to zero or leave"
1119 module_param_array(irqs, int, &num_irqs, 0);
1120 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1121 " addresses separated by commas. Only use if an interface"
1122 " has an interrupt. Otherwise, set it to zero or leave"
1124 module_param_array(regspacings, int, &num_regspacings, 0);
1125 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1126 " and each successive register used by the interface. For"
1127 " instance, if the start address is 0xca2 and the spacing"
1128 " is 2, then the second address is at 0xca4. Defaults"
1130 module_param_array(regsizes, int, &num_regsizes, 0);
1131 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1132 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1133 " 16-bit, 32-bit, or 64-bit register. Use this if you"
1134 " the 8-bit IPMI register has to be read from a larger"
1136 module_param_array(regshifts, int, &num_regshifts, 0);
1137 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1138 " IPMI register, in bits. For instance, if the data"
1139 " is read from a 32-bit word and the IPMI data is in"
1140 " bit 8-15, then the shift would be 8");
1141 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1142 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1143 " the controller. Normally this is 0x20, but can be"
1144 " overridden by this parm. This is an array indexed"
1145 " by interface number.");
1146 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1147 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1148 " disabled(0). Normally the IPMI driver auto-detects"
1149 " this, but the value may be overridden by this parm.");
1150 module_param(unload_when_empty, int, 0);
1151 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1152 " specified or found, default is 1. Setting to 0"
1153 " is useful for hot add of devices using hotmod.");
1156 static void std_irq_cleanup(struct smi_info *info)
1158 if (info->si_type == SI_BT)
1159 /* Disable the interrupt in the BT interface. */
1160 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1161 free_irq(info->irq, info);
1164 static int std_irq_setup(struct smi_info *info)
1171 if (info->si_type == SI_BT) {
1172 rv = request_irq(info->irq,
1174 IRQF_SHARED | IRQF_DISABLED,
1178 /* Enable the interrupt in the BT interface. */
1179 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1180 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1182 rv = request_irq(info->irq,
1184 IRQF_SHARED | IRQF_DISABLED,
1189 "ipmi_si: %s unable to claim interrupt %d,"
1190 " running polled\n",
1191 DEVICE_NAME, info->irq);
1194 info->irq_cleanup = std_irq_cleanup;
1195 printk(" Using irq %d\n", info->irq);
1201 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1203 unsigned int addr = io->addr_data;
1205 return inb(addr + (offset * io->regspacing));
1208 static void port_outb(struct si_sm_io *io, unsigned int offset,
1211 unsigned int addr = io->addr_data;
1213 outb(b, addr + (offset * io->regspacing));
1216 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1218 unsigned int addr = io->addr_data;
1220 return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1223 static void port_outw(struct si_sm_io *io, unsigned int offset,
1226 unsigned int addr = io->addr_data;
1228 outw(b << io->regshift, addr + (offset * io->regspacing));
1231 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1233 unsigned int addr = io->addr_data;
1235 return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1238 static void port_outl(struct si_sm_io *io, unsigned int offset,
1241 unsigned int addr = io->addr_data;
1243 outl(b << io->regshift, addr+(offset * io->regspacing));
1246 static void port_cleanup(struct smi_info *info)
1248 unsigned int addr = info->io.addr_data;
1252 for (idx = 0; idx < info->io_size; idx++) {
1253 release_region(addr + idx * info->io.regspacing,
1259 static int port_setup(struct smi_info *info)
1261 unsigned int addr = info->io.addr_data;
1267 info->io_cleanup = port_cleanup;
1269 /* Figure out the actual inb/inw/inl/etc routine to use based
1270 upon the register size. */
1271 switch (info->io.regsize) {
1273 info->io.inputb = port_inb;
1274 info->io.outputb = port_outb;
1277 info->io.inputb = port_inw;
1278 info->io.outputb = port_outw;
1281 info->io.inputb = port_inl;
1282 info->io.outputb = port_outl;
1285 printk("ipmi_si: Invalid register size: %d\n",
1290 /* Some BIOSes reserve disjoint I/O regions in their ACPI
1291 * tables. This causes problems when trying to register the
1292 * entire I/O region. Therefore we must register each I/O
1295 for (idx = 0; idx < info->io_size; idx++) {
1296 if (request_region(addr + idx * info->io.regspacing,
1297 info->io.regsize, DEVICE_NAME) == NULL) {
1298 /* Undo allocations */
1300 release_region(addr + idx * info->io.regspacing,
1309 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1311 return readb((io->addr)+(offset * io->regspacing));
1314 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1317 writeb(b, (io->addr)+(offset * io->regspacing));
1320 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1322 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1326 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1329 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1332 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1334 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1338 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1341 writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1345 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1347 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1351 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1354 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1358 static void mem_cleanup(struct smi_info *info)
1360 unsigned long addr = info->io.addr_data;
1363 if (info->io.addr) {
1364 iounmap(info->io.addr);
1366 mapsize = ((info->io_size * info->io.regspacing)
1367 - (info->io.regspacing - info->io.regsize));
1369 release_mem_region(addr, mapsize);
1373 static int mem_setup(struct smi_info *info)
1375 unsigned long addr = info->io.addr_data;
1381 info->io_cleanup = mem_cleanup;
1383 /* Figure out the actual readb/readw/readl/etc routine to use based
1384 upon the register size. */
1385 switch (info->io.regsize) {
1387 info->io.inputb = intf_mem_inb;
1388 info->io.outputb = intf_mem_outb;
1391 info->io.inputb = intf_mem_inw;
1392 info->io.outputb = intf_mem_outw;
1395 info->io.inputb = intf_mem_inl;
1396 info->io.outputb = intf_mem_outl;
1400 info->io.inputb = mem_inq;
1401 info->io.outputb = mem_outq;
1405 printk("ipmi_si: Invalid register size: %d\n",
1410 /* Calculate the total amount of memory to claim. This is an
1411 * unusual looking calculation, but it avoids claiming any
1412 * more memory than it has to. It will claim everything
1413 * between the first address to the end of the last full
1415 mapsize = ((info->io_size * info->io.regspacing)
1416 - (info->io.regspacing - info->io.regsize));
1418 if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1421 info->io.addr = ioremap(addr, mapsize);
1422 if (info->io.addr == NULL) {
1423 release_mem_region(addr, mapsize);
1430 * Parms come in as <op1>[:op2[:op3...]]. ops are:
1431 * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1439 enum hotmod_op { HM_ADD, HM_REMOVE };
1440 struct hotmod_vals {
1444 static struct hotmod_vals hotmod_ops[] = {
1446 { "remove", HM_REMOVE },
1449 static struct hotmod_vals hotmod_si[] = {
1451 { "smic", SI_SMIC },
1455 static struct hotmod_vals hotmod_as[] = {
1456 { "mem", IPMI_MEM_ADDR_SPACE },
1457 { "i/o", IPMI_IO_ADDR_SPACE },
1461 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1466 s = strchr(*curr, ',');
1468 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1473 for (i = 0; hotmod_ops[i].name; i++) {
1474 if (strcmp(*curr, v[i].name) == 0) {
1481 printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1485 static int check_hotmod_int_op(const char *curr, const char *option,
1486 const char *name, int *val)
1490 if (strcmp(curr, name) == 0) {
1492 printk(KERN_WARNING PFX
1493 "No option given for '%s'\n",
1497 *val = simple_strtoul(option, &n, 0);
1498 if ((*n != '\0') || (*option == '\0')) {
1499 printk(KERN_WARNING PFX
1500 "Bad option given for '%s'\n",
1509 static int hotmod_handler(const char *val, struct kernel_param *kp)
1511 char *str = kstrdup(val, GFP_KERNEL);
1513 char *next, *curr, *s, *n, *o;
1515 enum si_type si_type;
1525 struct smi_info *info;
1530 /* Kill any trailing spaces, as we can get a "\n" from echo. */
1533 while ((ival >= 0) && isspace(str[ival])) {
1538 for (curr = str; curr; curr = next) {
1545 next = strchr(curr, ':');
1551 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1556 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1561 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1565 s = strchr(curr, ',');
1570 addr = simple_strtoul(curr, &n, 0);
1571 if ((*n != '\0') || (*curr == '\0')) {
1572 printk(KERN_WARNING PFX "Invalid hotmod address"
1579 s = strchr(curr, ',');
1584 o = strchr(curr, '=');
1589 rv = check_hotmod_int_op(curr, o, "rsp", ®spacing);
1594 rv = check_hotmod_int_op(curr, o, "rsi", ®size);
1599 rv = check_hotmod_int_op(curr, o, "rsh", ®shift);
1604 rv = check_hotmod_int_op(curr, o, "irq", &irq);
1609 rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1616 printk(KERN_WARNING PFX
1617 "Invalid hotmod option '%s'\n",
1623 info = kzalloc(sizeof(*info), GFP_KERNEL);
1629 info->addr_source = "hotmod";
1630 info->si_type = si_type;
1631 info->io.addr_data = addr;
1632 info->io.addr_type = addr_space;
1633 if (addr_space == IPMI_MEM_ADDR_SPACE)
1634 info->io_setup = mem_setup;
1636 info->io_setup = port_setup;
1638 info->io.addr = NULL;
1639 info->io.regspacing = regspacing;
1640 if (!info->io.regspacing)
1641 info->io.regspacing = DEFAULT_REGSPACING;
1642 info->io.regsize = regsize;
1643 if (!info->io.regsize)
1644 info->io.regsize = DEFAULT_REGSPACING;
1645 info->io.regshift = regshift;
1648 info->irq_setup = std_irq_setup;
1649 info->slave_addr = ipmb;
1654 struct smi_info *e, *tmp_e;
1656 mutex_lock(&smi_infos_lock);
1657 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1658 if (e->io.addr_type != addr_space)
1660 if (e->si_type != si_type)
1662 if (e->io.addr_data == addr)
1665 mutex_unlock(&smi_infos_lock);
1674 static __devinit void hardcode_find_bmc(void)
1677 struct smi_info *info;
1679 for (i = 0; i < SI_MAX_PARMS; i++) {
1680 if (!ports[i] && !addrs[i])
1683 info = kzalloc(sizeof(*info), GFP_KERNEL);
1687 info->addr_source = "hardcoded";
1689 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1690 info->si_type = SI_KCS;
1691 } else if (strcmp(si_type[i], "smic") == 0) {
1692 info->si_type = SI_SMIC;
1693 } else if (strcmp(si_type[i], "bt") == 0) {
1694 info->si_type = SI_BT;
1697 "ipmi_si: Interface type specified "
1698 "for interface %d, was invalid: %s\n",
1706 info->io_setup = port_setup;
1707 info->io.addr_data = ports[i];
1708 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1709 } else if (addrs[i]) {
1711 info->io_setup = mem_setup;
1712 info->io.addr_data = addrs[i];
1713 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1716 "ipmi_si: Interface type specified "
1717 "for interface %d, "
1718 "but port and address were not set or "
1719 "set to zero.\n", i);
1724 info->io.addr = NULL;
1725 info->io.regspacing = regspacings[i];
1726 if (!info->io.regspacing)
1727 info->io.regspacing = DEFAULT_REGSPACING;
1728 info->io.regsize = regsizes[i];
1729 if (!info->io.regsize)
1730 info->io.regsize = DEFAULT_REGSPACING;
1731 info->io.regshift = regshifts[i];
1732 info->irq = irqs[i];
1734 info->irq_setup = std_irq_setup;
1742 #include <linux/acpi.h>
1744 /* Once we get an ACPI failure, we don't try any more, because we go
1745 through the tables sequentially. Once we don't find a table, there
1747 static int acpi_failure;
1749 /* For GPE-type interrupts. */
1750 static u32 ipmi_acpi_gpe(void *context)
1752 struct smi_info *smi_info = context;
1753 unsigned long flags;
1758 spin_lock_irqsave(&(smi_info->si_lock), flags);
1760 smi_inc_stat(smi_info, interrupts);
1763 do_gettimeofday(&t);
1764 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1766 smi_event_handler(smi_info, 0);
1767 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1769 return ACPI_INTERRUPT_HANDLED;
1772 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1777 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1780 static int acpi_gpe_irq_setup(struct smi_info *info)
1787 /* FIXME - is level triggered right? */
1788 status = acpi_install_gpe_handler(NULL,
1790 ACPI_GPE_LEVEL_TRIGGERED,
1793 if (status != AE_OK) {
1795 "ipmi_si: %s unable to claim ACPI GPE %d,"
1796 " running polled\n",
1797 DEVICE_NAME, info->irq);
1801 info->irq_cleanup = acpi_gpe_irq_cleanup;
1802 printk(" Using ACPI GPE %d\n", info->irq);
1809 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1820 s8 CreatorRevision[4];
1823 s16 SpecificationRevision;
1826 * Bit 0 - SCI interrupt supported
1827 * Bit 1 - I/O APIC/SAPIC
1831 /* If bit 0 of InterruptType is set, then this is the SCI
1832 interrupt in the GPEx_STS register. */
1837 /* If bit 1 of InterruptType is set, then this is the I/O
1838 APIC/SAPIC interrupt. */
1839 u32 GlobalSystemInterrupt;
1841 /* The actual register address. */
1842 struct acpi_generic_address addr;
1846 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1849 static __devinit int try_init_acpi(struct SPMITable *spmi)
1851 struct smi_info *info;
1854 if (spmi->IPMIlegacy != 1) {
1855 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1859 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1860 addr_space = IPMI_MEM_ADDR_SPACE;
1862 addr_space = IPMI_IO_ADDR_SPACE;
1864 info = kzalloc(sizeof(*info), GFP_KERNEL);
1866 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1870 info->addr_source = "ACPI";
1872 /* Figure out the interface type. */
1873 switch (spmi->InterfaceType)
1876 info->si_type = SI_KCS;
1879 info->si_type = SI_SMIC;
1882 info->si_type = SI_BT;
1885 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1886 spmi->InterfaceType);
1891 if (spmi->InterruptType & 1) {
1892 /* We've got a GPE interrupt. */
1893 info->irq = spmi->GPE;
1894 info->irq_setup = acpi_gpe_irq_setup;
1895 } else if (spmi->InterruptType & 2) {
1896 /* We've got an APIC/SAPIC interrupt. */
1897 info->irq = spmi->GlobalSystemInterrupt;
1898 info->irq_setup = std_irq_setup;
1900 /* Use the default interrupt setting. */
1902 info->irq_setup = NULL;
1905 if (spmi->addr.bit_width) {
1906 /* A (hopefully) properly formed register bit width. */
1907 info->io.regspacing = spmi->addr.bit_width / 8;
1909 info->io.regspacing = DEFAULT_REGSPACING;
1911 info->io.regsize = info->io.regspacing;
1912 info->io.regshift = spmi->addr.bit_offset;
1914 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1915 info->io_setup = mem_setup;
1916 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1917 } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1918 info->io_setup = port_setup;
1919 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1922 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1925 info->io.addr_data = spmi->addr.address;
1932 static __devinit void acpi_find_bmc(void)
1935 struct SPMITable *spmi;
1944 for (i = 0; ; i++) {
1945 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
1946 (struct acpi_table_header **)&spmi);
1947 if (status != AE_OK)
1950 try_init_acpi(spmi);
1956 struct dmi_ipmi_data
1960 unsigned long base_addr;
1966 static int __devinit decode_dmi(const struct dmi_header *dm,
1967 struct dmi_ipmi_data *dmi)
1969 const u8 *data = (const u8 *)dm;
1970 unsigned long base_addr;
1972 u8 len = dm->length;
1974 dmi->type = data[4];
1976 memcpy(&base_addr, data+8, sizeof(unsigned long));
1978 if (base_addr & 1) {
1980 base_addr &= 0xFFFE;
1981 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1985 dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1987 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1989 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1991 dmi->irq = data[0x11];
1993 /* The top two bits of byte 0x10 hold the register spacing. */
1994 reg_spacing = (data[0x10] & 0xC0) >> 6;
1995 switch(reg_spacing){
1996 case 0x00: /* Byte boundaries */
1999 case 0x01: /* 32-bit boundaries */
2002 case 0x02: /* 16-byte boundaries */
2006 /* Some other interface, just ignore it. */
2011 /* Note that technically, the lower bit of the base
2012 * address should be 1 if the address is I/O and 0 if
2013 * the address is in memory. So many systems get that
2014 * wrong (and all that I have seen are I/O) so we just
2015 * ignore that bit and assume I/O. Systems that use
2016 * memory should use the newer spec, anyway. */
2017 dmi->base_addr = base_addr & 0xfffe;
2018 dmi->addr_space = IPMI_IO_ADDR_SPACE;
2022 dmi->slave_addr = data[6];
2027 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2029 struct smi_info *info;
2031 info = kzalloc(sizeof(*info), GFP_KERNEL);
2034 "ipmi_si: Could not allocate SI data\n");
2038 info->addr_source = "SMBIOS";
2040 switch (ipmi_data->type) {
2041 case 0x01: /* KCS */
2042 info->si_type = SI_KCS;
2044 case 0x02: /* SMIC */
2045 info->si_type = SI_SMIC;
2048 info->si_type = SI_BT;
2055 switch (ipmi_data->addr_space) {
2056 case IPMI_MEM_ADDR_SPACE:
2057 info->io_setup = mem_setup;
2058 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2061 case IPMI_IO_ADDR_SPACE:
2062 info->io_setup = port_setup;
2063 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2069 "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2070 ipmi_data->addr_space);
2073 info->io.addr_data = ipmi_data->base_addr;
2075 info->io.regspacing = ipmi_data->offset;
2076 if (!info->io.regspacing)
2077 info->io.regspacing = DEFAULT_REGSPACING;
2078 info->io.regsize = DEFAULT_REGSPACING;
2079 info->io.regshift = 0;
2081 info->slave_addr = ipmi_data->slave_addr;
2083 info->irq = ipmi_data->irq;
2085 info->irq_setup = std_irq_setup;
2090 static void __devinit dmi_find_bmc(void)
2092 const struct dmi_device *dev = NULL;
2093 struct dmi_ipmi_data data;
2096 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2097 memset(&data, 0, sizeof(data));
2098 rv = decode_dmi((const struct dmi_header *) dev->device_data,
2101 try_init_dmi(&data);
2104 #endif /* CONFIG_DMI */
2108 #define PCI_ERMC_CLASSCODE 0x0C0700
2109 #define PCI_ERMC_CLASSCODE_MASK 0xffffff00
2110 #define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
2111 #define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
2112 #define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
2113 #define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
2115 #define PCI_HP_VENDOR_ID 0x103C
2116 #define PCI_MMC_DEVICE_ID 0x121A
2117 #define PCI_MMC_ADDR_CW 0x10
2119 static void ipmi_pci_cleanup(struct smi_info *info)
2121 struct pci_dev *pdev = info->addr_source_data;
2123 pci_disable_device(pdev);
2126 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2127 const struct pci_device_id *ent)
2130 int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2131 struct smi_info *info;
2132 int first_reg_offset = 0;
2134 info = kzalloc(sizeof(*info), GFP_KERNEL);
2138 info->addr_source = "PCI";
2140 switch (class_type) {
2141 case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2142 info->si_type = SI_SMIC;
2145 case PCI_ERMC_CLASSCODE_TYPE_KCS:
2146 info->si_type = SI_KCS;
2149 case PCI_ERMC_CLASSCODE_TYPE_BT:
2150 info->si_type = SI_BT;
2155 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
2156 pci_name(pdev), class_type);
2160 rv = pci_enable_device(pdev);
2162 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
2168 info->addr_source_cleanup = ipmi_pci_cleanup;
2169 info->addr_source_data = pdev;
2171 if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
2172 first_reg_offset = 1;
2174 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2175 info->io_setup = port_setup;
2176 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2178 info->io_setup = mem_setup;
2179 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2181 info->io.addr_data = pci_resource_start(pdev, 0);
2183 info->io.regspacing = DEFAULT_REGSPACING;
2184 info->io.regsize = DEFAULT_REGSPACING;
2185 info->io.regshift = 0;
2187 info->irq = pdev->irq;
2189 info->irq_setup = std_irq_setup;
2191 info->dev = &pdev->dev;
2192 pci_set_drvdata(pdev, info);
2194 return try_smi_init(info);
2197 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2199 struct smi_info *info = pci_get_drvdata(pdev);
2200 cleanup_one_si(info);
2204 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2209 static int ipmi_pci_resume(struct pci_dev *pdev)
2215 static struct pci_device_id ipmi_pci_devices[] = {
2216 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2217 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
2220 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2222 static struct pci_driver ipmi_pci_driver = {
2223 .name = DEVICE_NAME,
2224 .id_table = ipmi_pci_devices,
2225 .probe = ipmi_pci_probe,
2226 .remove = __devexit_p(ipmi_pci_remove),
2228 .suspend = ipmi_pci_suspend,
2229 .resume = ipmi_pci_resume,
2232 #endif /* CONFIG_PCI */
2235 #ifdef CONFIG_PPC_OF
2236 static int __devinit ipmi_of_probe(struct of_device *dev,
2237 const struct of_device_id *match)
2239 struct smi_info *info;
2240 struct resource resource;
2241 const int *regsize, *regspacing, *regshift;
2242 struct device_node *np = dev->node;
2246 dev_info(&dev->dev, PFX "probing via device tree\n");
2248 ret = of_address_to_resource(np, 0, &resource);
2250 dev_warn(&dev->dev, PFX "invalid address from OF\n");
2254 regsize = of_get_property(np, "reg-size", &proplen);
2255 if (regsize && proplen != 4) {
2256 dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
2260 regspacing = of_get_property(np, "reg-spacing", &proplen);
2261 if (regspacing && proplen != 4) {
2262 dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
2266 regshift = of_get_property(np, "reg-shift", &proplen);
2267 if (regshift && proplen != 4) {
2268 dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
2272 info = kzalloc(sizeof(*info), GFP_KERNEL);
2276 PFX "could not allocate memory for OF probe\n");
2280 info->si_type = (enum si_type) match->data;
2281 info->addr_source = "device-tree";
2282 info->io_setup = mem_setup;
2283 info->irq_setup = std_irq_setup;
2285 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2286 info->io.addr_data = resource.start;
2288 info->io.regsize = regsize ? *regsize : DEFAULT_REGSIZE;
2289 info->io.regspacing = regspacing ? *regspacing : DEFAULT_REGSPACING;
2290 info->io.regshift = regshift ? *regshift : 0;
2292 info->irq = irq_of_parse_and_map(dev->node, 0);
2293 info->dev = &dev->dev;
2295 dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n",
2296 info->io.addr_data, info->io.regsize, info->io.regspacing,
2299 dev->dev.driver_data = (void*) info;
2301 return try_smi_init(info);
2304 static int __devexit ipmi_of_remove(struct of_device *dev)
2306 cleanup_one_si(dev->dev.driver_data);
2310 static struct of_device_id ipmi_match[] =
2312 { .type = "ipmi", .compatible = "ipmi-kcs", .data = (void *)(unsigned long) SI_KCS },
2313 { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC },
2314 { .type = "ipmi", .compatible = "ipmi-bt", .data = (void *)(unsigned long) SI_BT },
2318 static struct of_platform_driver ipmi_of_platform_driver =
2321 .match_table = ipmi_match,
2322 .probe = ipmi_of_probe,
2323 .remove = __devexit_p(ipmi_of_remove),
2325 #endif /* CONFIG_PPC_OF */
2328 static int try_get_dev_id(struct smi_info *smi_info)
2330 unsigned char msg[2];
2331 unsigned char *resp;
2332 unsigned long resp_len;
2333 enum si_sm_result smi_result;
2336 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2340 /* Do a Get Device ID command, since it comes back with some
2342 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2343 msg[1] = IPMI_GET_DEVICE_ID_CMD;
2344 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2346 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2349 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2350 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2351 schedule_timeout_uninterruptible(1);
2352 smi_result = smi_info->handlers->event(
2353 smi_info->si_sm, 100);
2355 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2357 smi_result = smi_info->handlers->event(
2358 smi_info->si_sm, 0);
2363 if (smi_result == SI_SM_HOSED) {
2364 /* We couldn't get the state machine to run, so whatever's at
2365 the port is probably not an IPMI SMI interface. */
2370 /* Otherwise, we got some data. */
2371 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2372 resp, IPMI_MAX_MSG_LENGTH);
2374 /* Check and record info from the get device id, in case we need it. */
2375 rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id);
2382 static int type_file_read_proc(char *page, char **start, off_t off,
2383 int count, int *eof, void *data)
2385 struct smi_info *smi = data;
2387 return sprintf(page, "%s\n", si_to_str[smi->si_type]);
2390 static int stat_file_read_proc(char *page, char **start, off_t off,
2391 int count, int *eof, void *data)
2393 char *out = (char *) page;
2394 struct smi_info *smi = data;
2396 out += sprintf(out, "interrupts_enabled: %d\n",
2397 smi->irq && !smi->interrupt_disabled);
2398 out += sprintf(out, "short_timeouts: %u\n",
2399 smi_get_stat(smi, short_timeouts));
2400 out += sprintf(out, "long_timeouts: %u\n",
2401 smi_get_stat(smi, long_timeouts));
2402 out += sprintf(out, "timeout_restarts: %u\n",
2403 smi_get_stat(smi, timeout_restarts));
2404 out += sprintf(out, "idles: %u\n",
2405 smi_get_stat(smi, idles));
2406 out += sprintf(out, "interrupts: %u\n",
2407 smi_get_stat(smi, interrupts));
2408 out += sprintf(out, "attentions: %u\n",
2409 smi_get_stat(smi, attentions));
2410 out += sprintf(out, "flag_fetches: %u\n",
2411 smi_get_stat(smi, flag_fetches));
2412 out += sprintf(out, "hosed_count: %u\n",
2413 smi_get_stat(smi, hosed_count));
2414 out += sprintf(out, "complete_transactions: %u\n",
2415 smi_get_stat(smi, complete_transactions));
2416 out += sprintf(out, "events: %u\n",
2417 smi_get_stat(smi, events));
2418 out += sprintf(out, "watchdog_pretimeouts: %u\n",
2419 smi_get_stat(smi, watchdog_pretimeouts));
2420 out += sprintf(out, "incoming_messages: %u\n",
2421 smi_get_stat(smi, incoming_messages));
2426 static int param_read_proc(char *page, char **start, off_t off,
2427 int count, int *eof, void *data)
2429 struct smi_info *smi = data;
2431 return sprintf(page,
2432 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2433 si_to_str[smi->si_type],
2434 addr_space_to_str[smi->io.addr_type],
2444 * oem_data_avail_to_receive_msg_avail
2445 * @info - smi_info structure with msg_flags set
2447 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2448 * Returns 1 indicating need to re-run handle_flags().
2450 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2452 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2458 * setup_dell_poweredge_oem_data_handler
2459 * @info - smi_info.device_id must be populated
2461 * Systems that match, but have firmware version < 1.40 may assert
2462 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2463 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
2464 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2465 * as RECEIVE_MSG_AVAIL instead.
2467 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2468 * assert the OEM[012] bits, and if it did, the driver would have to
2469 * change to handle that properly, we don't actually check for the
2471 * Device ID = 0x20 BMC on PowerEdge 8G servers
2472 * Device Revision = 0x80
2473 * Firmware Revision1 = 0x01 BMC version 1.40
2474 * Firmware Revision2 = 0x40 BCD encoded
2475 * IPMI Version = 0x51 IPMI 1.5
2476 * Manufacturer ID = A2 02 00 Dell IANA
2478 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2479 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2482 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2483 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2484 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2485 #define DELL_IANA_MFR_ID 0x0002a2
2486 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2488 struct ipmi_device_id *id = &smi_info->device_id;
2489 if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2490 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
2491 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2492 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2493 smi_info->oem_data_avail_handler =
2494 oem_data_avail_to_receive_msg_avail;
2496 else if (ipmi_version_major(id) < 1 ||
2497 (ipmi_version_major(id) == 1 &&
2498 ipmi_version_minor(id) < 5)) {
2499 smi_info->oem_data_avail_handler =
2500 oem_data_avail_to_receive_msg_avail;
2505 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2506 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2508 struct ipmi_smi_msg *msg = smi_info->curr_msg;
2510 /* Make it a reponse */
2511 msg->rsp[0] = msg->data[0] | 4;
2512 msg->rsp[1] = msg->data[1];
2513 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2515 smi_info->curr_msg = NULL;
2516 deliver_recv_msg(smi_info, msg);
2520 * dell_poweredge_bt_xaction_handler
2521 * @info - smi_info.device_id must be populated
2523 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2524 * not respond to a Get SDR command if the length of the data
2525 * requested is exactly 0x3A, which leads to command timeouts and no
2526 * data returned. This intercepts such commands, and causes userspace
2527 * callers to try again with a different-sized buffer, which succeeds.
2530 #define STORAGE_NETFN 0x0A
2531 #define STORAGE_CMD_GET_SDR 0x23
2532 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2533 unsigned long unused,
2536 struct smi_info *smi_info = in;
2537 unsigned char *data = smi_info->curr_msg->data;
2538 unsigned int size = smi_info->curr_msg->data_size;
2540 (data[0]>>2) == STORAGE_NETFN &&
2541 data[1] == STORAGE_CMD_GET_SDR &&
2543 return_hosed_msg_badsize(smi_info);
2549 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2550 .notifier_call = dell_poweredge_bt_xaction_handler,
2554 * setup_dell_poweredge_bt_xaction_handler
2555 * @info - smi_info.device_id must be filled in already
2557 * Fills in smi_info.device_id.start_transaction_pre_hook
2558 * when we know what function to use there.
2561 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2563 struct ipmi_device_id *id = &smi_info->device_id;
2564 if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2565 smi_info->si_type == SI_BT)
2566 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2570 * setup_oem_data_handler
2571 * @info - smi_info.device_id must be filled in already
2573 * Fills in smi_info.device_id.oem_data_available_handler
2574 * when we know what function to use there.
2577 static void setup_oem_data_handler(struct smi_info *smi_info)
2579 setup_dell_poweredge_oem_data_handler(smi_info);
2582 static void setup_xaction_handlers(struct smi_info *smi_info)
2584 setup_dell_poweredge_bt_xaction_handler(smi_info);
2587 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2589 if (smi_info->intf) {
2590 /* The timer and thread are only running if the
2591 interface has been started up and registered. */
2592 if (smi_info->thread != NULL)
2593 kthread_stop(smi_info->thread);
2594 del_timer_sync(&smi_info->si_timer);
2598 static __devinitdata struct ipmi_default_vals
2604 { .type = SI_KCS, .port = 0xca2 },
2605 { .type = SI_SMIC, .port = 0xca9 },
2606 { .type = SI_BT, .port = 0xe4 },
2610 static __devinit void default_find_bmc(void)
2612 struct smi_info *info;
2615 for (i = 0; ; i++) {
2616 if (!ipmi_defaults[i].port)
2619 info = kzalloc(sizeof(*info), GFP_KERNEL);
2623 #ifdef CONFIG_PPC_MERGE
2624 if (check_legacy_ioport(ipmi_defaults[i].port))
2628 info->addr_source = NULL;
2630 info->si_type = ipmi_defaults[i].type;
2631 info->io_setup = port_setup;
2632 info->io.addr_data = ipmi_defaults[i].port;
2633 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2635 info->io.addr = NULL;
2636 info->io.regspacing = DEFAULT_REGSPACING;
2637 info->io.regsize = DEFAULT_REGSPACING;
2638 info->io.regshift = 0;
2640 if (try_smi_init(info) == 0) {
2642 printk(KERN_INFO "ipmi_si: Found default %s state"
2643 " machine at %s address 0x%lx\n",
2644 si_to_str[info->si_type],
2645 addr_space_to_str[info->io.addr_type],
2646 info->io.addr_data);
2652 static int is_new_interface(struct smi_info *info)
2656 list_for_each_entry(e, &smi_infos, link) {
2657 if (e->io.addr_type != info->io.addr_type)
2659 if (e->io.addr_data == info->io.addr_data)
2666 static int try_smi_init(struct smi_info *new_smi)
2671 if (new_smi->addr_source) {
2672 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2673 " machine at %s address 0x%lx, slave address 0x%x,"
2675 new_smi->addr_source,
2676 si_to_str[new_smi->si_type],
2677 addr_space_to_str[new_smi->io.addr_type],
2678 new_smi->io.addr_data,
2679 new_smi->slave_addr, new_smi->irq);
2682 mutex_lock(&smi_infos_lock);
2683 if (!is_new_interface(new_smi)) {
2684 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2689 /* So we know not to free it unless we have allocated one. */
2690 new_smi->intf = NULL;
2691 new_smi->si_sm = NULL;
2692 new_smi->handlers = NULL;
2694 switch (new_smi->si_type) {
2696 new_smi->handlers = &kcs_smi_handlers;
2700 new_smi->handlers = &smic_smi_handlers;
2704 new_smi->handlers = &bt_smi_handlers;
2708 /* No support for anything else yet. */
2713 /* Allocate the state machine's data and initialize it. */
2714 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2715 if (!new_smi->si_sm) {
2716 printk(" Could not allocate state machine memory\n");
2720 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2723 /* Now that we know the I/O size, we can set up the I/O. */
2724 rv = new_smi->io_setup(new_smi);
2726 printk(" Could not set up I/O space\n");
2730 spin_lock_init(&(new_smi->si_lock));
2731 spin_lock_init(&(new_smi->msg_lock));
2733 /* Do low-level detection first. */
2734 if (new_smi->handlers->detect(new_smi->si_sm)) {
2735 if (new_smi->addr_source)
2736 printk(KERN_INFO "ipmi_si: Interface detection"
2742 /* Attempt a get device id command. If it fails, we probably
2743 don't have a BMC here. */
2744 rv = try_get_dev_id(new_smi);
2746 if (new_smi->addr_source)
2747 printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2748 " at this location\n");
2752 setup_oem_data_handler(new_smi);
2753 setup_xaction_handlers(new_smi);
2755 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2756 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2757 new_smi->curr_msg = NULL;
2758 atomic_set(&new_smi->req_events, 0);
2759 new_smi->run_to_completion = 0;
2760 for (i = 0; i < SI_NUM_STATS; i++)
2761 atomic_set(&new_smi->stats[i], 0);
2763 new_smi->interrupt_disabled = 0;
2764 atomic_set(&new_smi->stop_operation, 0);
2765 new_smi->intf_num = smi_num;
2768 /* Start clearing the flags before we enable interrupts or the
2769 timer to avoid racing with the timer. */
2770 start_clear_flags(new_smi);
2771 /* IRQ is defined to be set when non-zero. */
2773 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2775 if (!new_smi->dev) {
2776 /* If we don't already have a device from something
2777 * else (like PCI), then register a new one. */
2778 new_smi->pdev = platform_device_alloc("ipmi_si",
2783 " Unable to allocate platform device\n");
2786 new_smi->dev = &new_smi->pdev->dev;
2787 new_smi->dev->driver = &ipmi_driver;
2789 rv = platform_device_add(new_smi->pdev);
2793 " Unable to register system interface device:"
2798 new_smi->dev_registered = 1;
2801 rv = ipmi_register_smi(&handlers,
2803 &new_smi->device_id,
2806 new_smi->slave_addr);
2809 "ipmi_si: Unable to register device: error %d\n",
2811 goto out_err_stop_timer;
2814 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2815 type_file_read_proc, NULL,
2816 new_smi, THIS_MODULE);
2819 "ipmi_si: Unable to create proc entry: %d\n",
2821 goto out_err_stop_timer;
2824 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2825 stat_file_read_proc, NULL,
2826 new_smi, THIS_MODULE);
2829 "ipmi_si: Unable to create proc entry: %d\n",
2831 goto out_err_stop_timer;
2834 rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2835 param_read_proc, NULL,
2836 new_smi, THIS_MODULE);
2839 "ipmi_si: Unable to create proc entry: %d\n",
2841 goto out_err_stop_timer;
2844 list_add_tail(&new_smi->link, &smi_infos);
2846 mutex_unlock(&smi_infos_lock);
2848 printk(KERN_INFO "IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2853 atomic_inc(&new_smi->stop_operation);
2854 wait_for_timer_and_thread(new_smi);
2858 ipmi_unregister_smi(new_smi->intf);
2860 if (new_smi->irq_cleanup)
2861 new_smi->irq_cleanup(new_smi);
2863 /* Wait until we know that we are out of any interrupt
2864 handlers might have been running before we freed the
2866 synchronize_sched();
2868 if (new_smi->si_sm) {
2869 if (new_smi->handlers)
2870 new_smi->handlers->cleanup(new_smi->si_sm);
2871 kfree(new_smi->si_sm);
2873 if (new_smi->addr_source_cleanup)
2874 new_smi->addr_source_cleanup(new_smi);
2875 if (new_smi->io_cleanup)
2876 new_smi->io_cleanup(new_smi);
2878 if (new_smi->dev_registered)
2879 platform_device_unregister(new_smi->pdev);
2883 mutex_unlock(&smi_infos_lock);
2888 static __devinit int init_ipmi_si(void)
2898 /* Register the device drivers. */
2899 rv = driver_register(&ipmi_driver);
2902 "init_ipmi_si: Unable to register driver: %d\n",
2908 /* Parse out the si_type string into its components. */
2911 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2913 str = strchr(str, ',');
2923 printk(KERN_INFO "IPMI System Interface driver.\n");
2925 hardcode_find_bmc();
2936 rv = pci_register_driver(&ipmi_pci_driver);
2939 "init_ipmi_si: Unable to register PCI driver: %d\n",
2944 #ifdef CONFIG_PPC_OF
2945 of_register_platform_driver(&ipmi_of_platform_driver);
2948 if (si_trydefaults) {
2949 mutex_lock(&smi_infos_lock);
2950 if (list_empty(&smi_infos)) {
2951 /* No BMC was found, try defaults. */
2952 mutex_unlock(&smi_infos_lock);
2955 mutex_unlock(&smi_infos_lock);
2959 mutex_lock(&smi_infos_lock);
2960 if (unload_when_empty && list_empty(&smi_infos)) {
2961 mutex_unlock(&smi_infos_lock);
2963 pci_unregister_driver(&ipmi_pci_driver);
2966 #ifdef CONFIG_PPC_OF
2967 of_unregister_platform_driver(&ipmi_of_platform_driver);
2969 driver_unregister(&ipmi_driver);
2970 printk("ipmi_si: Unable to find any System Interface(s)\n");
2973 mutex_unlock(&smi_infos_lock);
2977 module_init(init_ipmi_si);
2979 static void cleanup_one_si(struct smi_info *to_clean)
2982 unsigned long flags;
2987 list_del(&to_clean->link);
2989 /* Tell the driver that we are shutting down. */
2990 atomic_inc(&to_clean->stop_operation);
2992 /* Make sure the timer and thread are stopped and will not run
2994 wait_for_timer_and_thread(to_clean);
2996 /* Timeouts are stopped, now make sure the interrupts are off
2997 for the device. A little tricky with locks to make sure
2998 there are no races. */
2999 spin_lock_irqsave(&to_clean->si_lock, flags);
3000 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3001 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3003 schedule_timeout_uninterruptible(1);
3004 spin_lock_irqsave(&to_clean->si_lock, flags);
3006 disable_si_irq(to_clean);
3007 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3008 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3010 schedule_timeout_uninterruptible(1);
3013 /* Clean up interrupts and make sure that everything is done. */
3014 if (to_clean->irq_cleanup)
3015 to_clean->irq_cleanup(to_clean);
3016 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3018 schedule_timeout_uninterruptible(1);
3021 rv = ipmi_unregister_smi(to_clean->intf);
3024 "ipmi_si: Unable to unregister device: errno=%d\n",
3028 to_clean->handlers->cleanup(to_clean->si_sm);
3030 kfree(to_clean->si_sm);
3032 if (to_clean->addr_source_cleanup)
3033 to_clean->addr_source_cleanup(to_clean);
3034 if (to_clean->io_cleanup)
3035 to_clean->io_cleanup(to_clean);
3037 if (to_clean->dev_registered)
3038 platform_device_unregister(to_clean->pdev);
3043 static __exit void cleanup_ipmi_si(void)
3045 struct smi_info *e, *tmp_e;
3051 pci_unregister_driver(&ipmi_pci_driver);
3054 #ifdef CONFIG_PPC_OF
3055 of_unregister_platform_driver(&ipmi_of_platform_driver);
3058 mutex_lock(&smi_infos_lock);
3059 list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
3061 mutex_unlock(&smi_infos_lock);
3063 driver_unregister(&ipmi_driver);
3065 module_exit(cleanup_ipmi_si);
3067 MODULE_LICENSE("GPL");
3068 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3069 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");