286c042a0c669349cdbbcb5be7307f47f241ae1d
[pandora-kernel.git] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
13  *
14  *  This program is free software; you can redistribute it and/or modify it
15  *  under the terms of the GNU General Public License as published by the
16  *  Free Software Foundation; either version 2 of the License, or (at your
17  *  option) any later version.
18  *
19  *
20  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
21  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
28  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
29  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  *  You should have received a copy of the GNU General Public License along
32  *  with this program; if not, write to the Free Software Foundation, Inc.,
33  *  675 Mass Ave, Cambridge, MA 02139, USA.
34  */
35
36 /*
37  * This file holds the "policy" for the interface to the SMI state
38  * machine.  It does the configuration, handles timers and interrupts,
39  * and drives the real SMI state machine.
40  */
41
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <asm/system.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/notifier.h>
55 #include <linux/mutex.h>
56 #include <linux/kthread.h>
57 #include <asm/irq.h>
58 #include <linux/interrupt.h>
59 #include <linux/rcupdate.h>
60 #include <linux/ipmi_smi.h>
61 #include <asm/io.h>
62 #include "ipmi_si_sm.h"
63 #include <linux/init.h>
64 #include <linux/dmi.h>
65 #include <linux/string.h>
66 #include <linux/ctype.h>
67
68 #ifdef CONFIG_PPC_OF
69 #include <asm/of_device.h>
70 #include <asm/of_platform.h>
71 #endif
72
73 #define PFX "ipmi_si: "
74
75 /* Measure times between events in the driver. */
76 #undef DEBUG_TIMING
77
78 /* Call every 10 ms. */
79 #define SI_TIMEOUT_TIME_USEC    10000
80 #define SI_USEC_PER_JIFFY       (1000000/HZ)
81 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
82 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
83                                        short timeout */
84
85 /* Bit for BMC global enables. */
86 #define IPMI_BMC_RCV_MSG_INTR     0x01
87 #define IPMI_BMC_EVT_MSG_INTR     0x02
88 #define IPMI_BMC_EVT_MSG_BUFF     0x04
89 #define IPMI_BMC_SYS_LOG          0x08
90
91 enum si_intf_state {
92         SI_NORMAL,
93         SI_GETTING_FLAGS,
94         SI_GETTING_EVENTS,
95         SI_CLEARING_FLAGS,
96         SI_CLEARING_FLAGS_THEN_SET_IRQ,
97         SI_GETTING_MESSAGES,
98         SI_ENABLE_INTERRUPTS1,
99         SI_ENABLE_INTERRUPTS2,
100         SI_DISABLE_INTERRUPTS1,
101         SI_DISABLE_INTERRUPTS2
102         /* FIXME - add watchdog stuff. */
103 };
104
105 /* Some BT-specific defines we need here. */
106 #define IPMI_BT_INTMASK_REG             2
107 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
108 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
109
110 enum si_type {
111     SI_KCS, SI_SMIC, SI_BT
112 };
113 static char *si_to_str[] = { "kcs", "smic", "bt" };
114
115 #define DEVICE_NAME "ipmi_si"
116
117 static struct device_driver ipmi_driver =
118 {
119         .name = DEVICE_NAME,
120         .bus = &platform_bus_type
121 };
122
123
124 /*
125  * Indexes into stats[] in smi_info below.
126  */
127
128 #define SI_STAT_short_timeouts          0
129 #define SI_STAT_long_timeouts           1
130 #define SI_STAT_timeout_restarts        2
131 #define SI_STAT_idles                   3
132 #define SI_STAT_interrupts              4
133 #define SI_STAT_attentions              5
134 #define SI_STAT_flag_fetches            6
135 #define SI_STAT_hosed_count             7
136 #define SI_STAT_complete_transactions   8
137 #define SI_STAT_events                  9
138 #define SI_STAT_watchdog_pretimeouts    10
139 #define SI_STAT_incoming_messages       11
140
141 /* If you add a stat, you must update this value. */
142 #define SI_NUM_STATS                    12
143
144 struct smi_info
145 {
146         int                    intf_num;
147         ipmi_smi_t             intf;
148         struct si_sm_data      *si_sm;
149         struct si_sm_handlers  *handlers;
150         enum si_type           si_type;
151         spinlock_t             si_lock;
152         spinlock_t             msg_lock;
153         struct list_head       xmit_msgs;
154         struct list_head       hp_xmit_msgs;
155         struct ipmi_smi_msg    *curr_msg;
156         enum si_intf_state     si_state;
157
158         /* Used to handle the various types of I/O that can occur with
159            IPMI */
160         struct si_sm_io io;
161         int (*io_setup)(struct smi_info *info);
162         void (*io_cleanup)(struct smi_info *info);
163         int (*irq_setup)(struct smi_info *info);
164         void (*irq_cleanup)(struct smi_info *info);
165         unsigned int io_size;
166         char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
167         void (*addr_source_cleanup)(struct smi_info *info);
168         void *addr_source_data;
169
170         /* Per-OEM handler, called from handle_flags().
171            Returns 1 when handle_flags() needs to be re-run
172            or 0 indicating it set si_state itself.
173         */
174         int (*oem_data_avail_handler)(struct smi_info *smi_info);
175
176         /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
177            is set to hold the flags until we are done handling everything
178            from the flags. */
179 #define RECEIVE_MSG_AVAIL       0x01
180 #define EVENT_MSG_BUFFER_FULL   0x02
181 #define WDT_PRE_TIMEOUT_INT     0x08
182 #define OEM0_DATA_AVAIL     0x20
183 #define OEM1_DATA_AVAIL     0x40
184 #define OEM2_DATA_AVAIL     0x80
185 #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
186                              OEM1_DATA_AVAIL | \
187                              OEM2_DATA_AVAIL)
188         unsigned char       msg_flags;
189
190         /* If set to true, this will request events the next time the
191            state machine is idle. */
192         atomic_t            req_events;
193
194         /* If true, run the state machine to completion on every send
195            call.  Generally used after a panic to make sure stuff goes
196            out. */
197         int                 run_to_completion;
198
199         /* The I/O port of an SI interface. */
200         int                 port;
201
202         /* The space between start addresses of the two ports.  For
203            instance, if the first port is 0xca2 and the spacing is 4, then
204            the second port is 0xca6. */
205         unsigned int        spacing;
206
207         /* zero if no irq; */
208         int                 irq;
209
210         /* The timer for this si. */
211         struct timer_list   si_timer;
212
213         /* The time (in jiffies) the last timeout occurred at. */
214         unsigned long       last_timeout_jiffies;
215
216         /* Used to gracefully stop the timer without race conditions. */
217         atomic_t            stop_operation;
218
219         /* The driver will disable interrupts when it gets into a
220            situation where it cannot handle messages due to lack of
221            memory.  Once that situation clears up, it will re-enable
222            interrupts. */
223         int interrupt_disabled;
224
225         /* From the get device id response... */
226         struct ipmi_device_id device_id;
227
228         /* Driver model stuff. */
229         struct device *dev;
230         struct platform_device *pdev;
231
232          /* True if we allocated the device, false if it came from
233           * someplace else (like PCI). */
234         int dev_registered;
235
236         /* Slave address, could be reported from DMI. */
237         unsigned char slave_addr;
238
239         /* Counters and things for the proc filesystem. */
240         atomic_t stats[SI_NUM_STATS];
241
242         struct task_struct *thread;
243
244         struct list_head link;
245 };
246
247 #define smi_inc_stat(smi, stat) \
248         atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
249 #define smi_get_stat(smi, stat) \
250         ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
251
252 #define SI_MAX_PARMS 4
253
254 static int force_kipmid[SI_MAX_PARMS];
255 static int num_force_kipmid;
256
257 static int unload_when_empty = 1;
258
259 static int try_smi_init(struct smi_info *smi);
260 static void cleanup_one_si(struct smi_info *to_clean);
261
262 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
263 static int register_xaction_notifier(struct notifier_block * nb)
264 {
265         return atomic_notifier_chain_register(&xaction_notifier_list, nb);
266 }
267
268 static void deliver_recv_msg(struct smi_info *smi_info,
269                              struct ipmi_smi_msg *msg)
270 {
271         /* Deliver the message to the upper layer with the lock
272            released. */
273         spin_unlock(&(smi_info->si_lock));
274         ipmi_smi_msg_received(smi_info->intf, msg);
275         spin_lock(&(smi_info->si_lock));
276 }
277
278 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
279 {
280         struct ipmi_smi_msg *msg = smi_info->curr_msg;
281
282         if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
283                 cCode = IPMI_ERR_UNSPECIFIED;
284         /* else use it as is */
285
286         /* Make it a reponse */
287         msg->rsp[0] = msg->data[0] | 4;
288         msg->rsp[1] = msg->data[1];
289         msg->rsp[2] = cCode;
290         msg->rsp_size = 3;
291
292         smi_info->curr_msg = NULL;
293         deliver_recv_msg(smi_info, msg);
294 }
295
296 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
297 {
298         int              rv;
299         struct list_head *entry = NULL;
300 #ifdef DEBUG_TIMING
301         struct timeval t;
302 #endif
303
304         /* No need to save flags, we aleady have interrupts off and we
305            already hold the SMI lock. */
306         if (!smi_info->run_to_completion)
307                 spin_lock(&(smi_info->msg_lock));
308
309         /* Pick the high priority queue first. */
310         if (!list_empty(&(smi_info->hp_xmit_msgs))) {
311                 entry = smi_info->hp_xmit_msgs.next;
312         } else if (!list_empty(&(smi_info->xmit_msgs))) {
313                 entry = smi_info->xmit_msgs.next;
314         }
315
316         if (!entry) {
317                 smi_info->curr_msg = NULL;
318                 rv = SI_SM_IDLE;
319         } else {
320                 int err;
321
322                 list_del(entry);
323                 smi_info->curr_msg = list_entry(entry,
324                                                 struct ipmi_smi_msg,
325                                                 link);
326 #ifdef DEBUG_TIMING
327                 do_gettimeofday(&t);
328                 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
329 #endif
330                 err = atomic_notifier_call_chain(&xaction_notifier_list,
331                                 0, smi_info);
332                 if (err & NOTIFY_STOP_MASK) {
333                         rv = SI_SM_CALL_WITHOUT_DELAY;
334                         goto out;
335                 }
336                 err = smi_info->handlers->start_transaction(
337                         smi_info->si_sm,
338                         smi_info->curr_msg->data,
339                         smi_info->curr_msg->data_size);
340                 if (err) {
341                         return_hosed_msg(smi_info, err);
342                 }
343
344                 rv = SI_SM_CALL_WITHOUT_DELAY;
345         }
346         out:
347         if (!smi_info->run_to_completion)
348                 spin_unlock(&(smi_info->msg_lock));
349
350         return rv;
351 }
352
353 static void start_enable_irq(struct smi_info *smi_info)
354 {
355         unsigned char msg[2];
356
357         /* If we are enabling interrupts, we have to tell the
358            BMC to use them. */
359         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
360         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
361
362         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
363         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
364 }
365
366 static void start_disable_irq(struct smi_info *smi_info)
367 {
368         unsigned char msg[2];
369
370         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
371         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
372
373         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
374         smi_info->si_state = SI_DISABLE_INTERRUPTS1;
375 }
376
377 static void start_clear_flags(struct smi_info *smi_info)
378 {
379         unsigned char msg[3];
380
381         /* Make sure the watchdog pre-timeout flag is not set at startup. */
382         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
383         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
384         msg[2] = WDT_PRE_TIMEOUT_INT;
385
386         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
387         smi_info->si_state = SI_CLEARING_FLAGS;
388 }
389
390 /* When we have a situtaion where we run out of memory and cannot
391    allocate messages, we just leave them in the BMC and run the system
392    polled until we can allocate some memory.  Once we have some
393    memory, we will re-enable the interrupt. */
394 static inline void disable_si_irq(struct smi_info *smi_info)
395 {
396         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
397                 start_disable_irq(smi_info);
398                 smi_info->interrupt_disabled = 1;
399         }
400 }
401
402 static inline void enable_si_irq(struct smi_info *smi_info)
403 {
404         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
405                 start_enable_irq(smi_info);
406                 smi_info->interrupt_disabled = 0;
407         }
408 }
409
410 static void handle_flags(struct smi_info *smi_info)
411 {
412  retry:
413         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
414                 /* Watchdog pre-timeout */
415                 smi_inc_stat(smi_info, watchdog_pretimeouts);
416
417                 start_clear_flags(smi_info);
418                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
419                 spin_unlock(&(smi_info->si_lock));
420                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
421                 spin_lock(&(smi_info->si_lock));
422         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
423                 /* Messages available. */
424                 smi_info->curr_msg = ipmi_alloc_smi_msg();
425                 if (!smi_info->curr_msg) {
426                         disable_si_irq(smi_info);
427                         smi_info->si_state = SI_NORMAL;
428                         return;
429                 }
430                 enable_si_irq(smi_info);
431
432                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
433                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
434                 smi_info->curr_msg->data_size = 2;
435
436                 smi_info->handlers->start_transaction(
437                         smi_info->si_sm,
438                         smi_info->curr_msg->data,
439                         smi_info->curr_msg->data_size);
440                 smi_info->si_state = SI_GETTING_MESSAGES;
441         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
442                 /* Events available. */
443                 smi_info->curr_msg = ipmi_alloc_smi_msg();
444                 if (!smi_info->curr_msg) {
445                         disable_si_irq(smi_info);
446                         smi_info->si_state = SI_NORMAL;
447                         return;
448                 }
449                 enable_si_irq(smi_info);
450
451                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
452                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
453                 smi_info->curr_msg->data_size = 2;
454
455                 smi_info->handlers->start_transaction(
456                         smi_info->si_sm,
457                         smi_info->curr_msg->data,
458                         smi_info->curr_msg->data_size);
459                 smi_info->si_state = SI_GETTING_EVENTS;
460         } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
461                    smi_info->oem_data_avail_handler) {
462                 if (smi_info->oem_data_avail_handler(smi_info))
463                         goto retry;
464         } else {
465                 smi_info->si_state = SI_NORMAL;
466         }
467 }
468
469 static void handle_transaction_done(struct smi_info *smi_info)
470 {
471         struct ipmi_smi_msg *msg;
472 #ifdef DEBUG_TIMING
473         struct timeval t;
474
475         do_gettimeofday(&t);
476         printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
477 #endif
478         switch (smi_info->si_state) {
479         case SI_NORMAL:
480                 if (!smi_info->curr_msg)
481                         break;
482
483                 smi_info->curr_msg->rsp_size
484                         = smi_info->handlers->get_result(
485                                 smi_info->si_sm,
486                                 smi_info->curr_msg->rsp,
487                                 IPMI_MAX_MSG_LENGTH);
488
489                 /* Do this here becase deliver_recv_msg() releases the
490                    lock, and a new message can be put in during the
491                    time the lock is released. */
492                 msg = smi_info->curr_msg;
493                 smi_info->curr_msg = NULL;
494                 deliver_recv_msg(smi_info, msg);
495                 break;
496
497         case SI_GETTING_FLAGS:
498         {
499                 unsigned char msg[4];
500                 unsigned int  len;
501
502                 /* We got the flags from the SMI, now handle them. */
503                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
504                 if (msg[2] != 0) {
505                         /* Error fetching flags, just give up for
506                            now. */
507                         smi_info->si_state = SI_NORMAL;
508                 } else if (len < 4) {
509                         /* Hmm, no flags.  That's technically illegal, but
510                            don't use uninitialized data. */
511                         smi_info->si_state = SI_NORMAL;
512                 } else {
513                         smi_info->msg_flags = msg[3];
514                         handle_flags(smi_info);
515                 }
516                 break;
517         }
518
519         case SI_CLEARING_FLAGS:
520         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
521         {
522                 unsigned char msg[3];
523
524                 /* We cleared the flags. */
525                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
526                 if (msg[2] != 0) {
527                         /* Error clearing flags */
528                         printk(KERN_WARNING
529                                "ipmi_si: Error clearing flags: %2.2x\n",
530                                msg[2]);
531                 }
532                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
533                         start_enable_irq(smi_info);
534                 else
535                         smi_info->si_state = SI_NORMAL;
536                 break;
537         }
538
539         case SI_GETTING_EVENTS:
540         {
541                 smi_info->curr_msg->rsp_size
542                         = smi_info->handlers->get_result(
543                                 smi_info->si_sm,
544                                 smi_info->curr_msg->rsp,
545                                 IPMI_MAX_MSG_LENGTH);
546
547                 /* Do this here becase deliver_recv_msg() releases the
548                    lock, and a new message can be put in during the
549                    time the lock is released. */
550                 msg = smi_info->curr_msg;
551                 smi_info->curr_msg = NULL;
552                 if (msg->rsp[2] != 0) {
553                         /* Error getting event, probably done. */
554                         msg->done(msg);
555
556                         /* Take off the event flag. */
557                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
558                         handle_flags(smi_info);
559                 } else {
560                         smi_inc_stat(smi_info, events);
561
562                         /* Do this before we deliver the message
563                            because delivering the message releases the
564                            lock and something else can mess with the
565                            state. */
566                         handle_flags(smi_info);
567
568                         deliver_recv_msg(smi_info, msg);
569                 }
570                 break;
571         }
572
573         case SI_GETTING_MESSAGES:
574         {
575                 smi_info->curr_msg->rsp_size
576                         = smi_info->handlers->get_result(
577                                 smi_info->si_sm,
578                                 smi_info->curr_msg->rsp,
579                                 IPMI_MAX_MSG_LENGTH);
580
581                 /* Do this here becase deliver_recv_msg() releases the
582                    lock, and a new message can be put in during the
583                    time the lock is released. */
584                 msg = smi_info->curr_msg;
585                 smi_info->curr_msg = NULL;
586                 if (msg->rsp[2] != 0) {
587                         /* Error getting event, probably done. */
588                         msg->done(msg);
589
590                         /* Take off the msg flag. */
591                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
592                         handle_flags(smi_info);
593                 } else {
594                         smi_inc_stat(smi_info, incoming_messages);
595
596                         /* Do this before we deliver the message
597                            because delivering the message releases the
598                            lock and something else can mess with the
599                            state. */
600                         handle_flags(smi_info);
601
602                         deliver_recv_msg(smi_info, msg);
603                 }
604                 break;
605         }
606
607         case SI_ENABLE_INTERRUPTS1:
608         {
609                 unsigned char msg[4];
610
611                 /* We got the flags from the SMI, now handle them. */
612                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
613                 if (msg[2] != 0) {
614                         printk(KERN_WARNING
615                                "ipmi_si: Could not enable interrupts"
616                                ", failed get, using polled mode.\n");
617                         smi_info->si_state = SI_NORMAL;
618                 } else {
619                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
620                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
621                         msg[2] = (msg[3] |
622                                   IPMI_BMC_RCV_MSG_INTR |
623                                   IPMI_BMC_EVT_MSG_INTR);
624                         smi_info->handlers->start_transaction(
625                                 smi_info->si_sm, msg, 3);
626                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
627                 }
628                 break;
629         }
630
631         case SI_ENABLE_INTERRUPTS2:
632         {
633                 unsigned char msg[4];
634
635                 /* We got the flags from the SMI, now handle them. */
636                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
637                 if (msg[2] != 0) {
638                         printk(KERN_WARNING
639                                "ipmi_si: Could not enable interrupts"
640                                ", failed set, using polled mode.\n");
641                 }
642                 smi_info->si_state = SI_NORMAL;
643                 break;
644         }
645
646         case SI_DISABLE_INTERRUPTS1:
647         {
648                 unsigned char msg[4];
649
650                 /* We got the flags from the SMI, now handle them. */
651                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
652                 if (msg[2] != 0) {
653                         printk(KERN_WARNING
654                                "ipmi_si: Could not disable interrupts"
655                                ", failed get.\n");
656                         smi_info->si_state = SI_NORMAL;
657                 } else {
658                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
659                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
660                         msg[2] = (msg[3] &
661                                   ~(IPMI_BMC_RCV_MSG_INTR |
662                                     IPMI_BMC_EVT_MSG_INTR));
663                         smi_info->handlers->start_transaction(
664                                 smi_info->si_sm, msg, 3);
665                         smi_info->si_state = SI_DISABLE_INTERRUPTS2;
666                 }
667                 break;
668         }
669
670         case SI_DISABLE_INTERRUPTS2:
671         {
672                 unsigned char msg[4];
673
674                 /* We got the flags from the SMI, now handle them. */
675                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
676                 if (msg[2] != 0) {
677                         printk(KERN_WARNING
678                                "ipmi_si: Could not disable interrupts"
679                                ", failed set.\n");
680                 }
681                 smi_info->si_state = SI_NORMAL;
682                 break;
683         }
684         }
685 }
686
687 /* Called on timeouts and events.  Timeouts should pass the elapsed
688    time, interrupts should pass in zero.  Must be called with
689    si_lock held and interrupts disabled. */
690 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
691                                            int time)
692 {
693         enum si_sm_result si_sm_result;
694
695  restart:
696         /* There used to be a loop here that waited a little while
697            (around 25us) before giving up.  That turned out to be
698            pointless, the minimum delays I was seeing were in the 300us
699            range, which is far too long to wait in an interrupt.  So
700            we just run until the state machine tells us something
701            happened or it needs a delay. */
702         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
703         time = 0;
704         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
705         {
706                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
707         }
708
709         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
710         {
711                 smi_inc_stat(smi_info, complete_transactions);
712
713                 handle_transaction_done(smi_info);
714                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
715         }
716         else if (si_sm_result == SI_SM_HOSED)
717         {
718                 smi_inc_stat(smi_info, hosed_count);
719
720                 /* Do the before return_hosed_msg, because that
721                    releases the lock. */
722                 smi_info->si_state = SI_NORMAL;
723                 if (smi_info->curr_msg != NULL) {
724                         /* If we were handling a user message, format
725                            a response to send to the upper layer to
726                            tell it about the error. */
727                         return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
728                 }
729                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
730         }
731
732         /*
733          * We prefer handling attn over new messages.  But don't do
734          * this if there is not yet an upper layer to handle anything.
735          */
736         if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN)
737         {
738                 unsigned char msg[2];
739
740                 smi_inc_stat(smi_info, attentions);
741
742                 /* Got a attn, send down a get message flags to see
743                    what's causing it.  It would be better to handle
744                    this in the upper layer, but due to the way
745                    interrupts work with the SMI, that's not really
746                    possible. */
747                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
748                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
749
750                 smi_info->handlers->start_transaction(
751                         smi_info->si_sm, msg, 2);
752                 smi_info->si_state = SI_GETTING_FLAGS;
753                 goto restart;
754         }
755
756         /* If we are currently idle, try to start the next message. */
757         if (si_sm_result == SI_SM_IDLE) {
758                 smi_inc_stat(smi_info, idles);
759
760                 si_sm_result = start_next_msg(smi_info);
761                 if (si_sm_result != SI_SM_IDLE)
762                         goto restart;
763         }
764
765         if ((si_sm_result == SI_SM_IDLE)
766             && (atomic_read(&smi_info->req_events)))
767         {
768                 /* We are idle and the upper layer requested that I fetch
769                    events, so do so. */
770                 atomic_set(&smi_info->req_events, 0);
771
772                 smi_info->curr_msg = ipmi_alloc_smi_msg();
773                 if (!smi_info->curr_msg)
774                         goto out;
775
776                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
777                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
778                 smi_info->curr_msg->data_size = 2;
779
780                 smi_info->handlers->start_transaction(
781                         smi_info->si_sm,
782                         smi_info->curr_msg->data,
783                         smi_info->curr_msg->data_size);
784                 smi_info->si_state = SI_GETTING_EVENTS;
785                 goto restart;
786         }
787  out:
788         return si_sm_result;
789 }
790
791 static void sender(void                *send_info,
792                    struct ipmi_smi_msg *msg,
793                    int                 priority)
794 {
795         struct smi_info   *smi_info = send_info;
796         enum si_sm_result result;
797         unsigned long     flags;
798 #ifdef DEBUG_TIMING
799         struct timeval    t;
800 #endif
801
802         if (atomic_read(&smi_info->stop_operation)) {
803                 msg->rsp[0] = msg->data[0] | 4;
804                 msg->rsp[1] = msg->data[1];
805                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
806                 msg->rsp_size = 3;
807                 deliver_recv_msg(smi_info, msg);
808                 return;
809         }
810
811 #ifdef DEBUG_TIMING
812         do_gettimeofday(&t);
813         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
814 #endif
815
816         if (smi_info->run_to_completion) {
817                 /*
818                  * If we are running to completion, then throw it in
819                  * the list and run transactions until everything is
820                  * clear.  Priority doesn't matter here.
821                  */
822
823                 /*
824                  * Run to completion means we are single-threaded, no
825                  * need for locks.
826                  */
827                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
828
829                 result = smi_event_handler(smi_info, 0);
830                 while (result != SI_SM_IDLE) {
831                         udelay(SI_SHORT_TIMEOUT_USEC);
832                         result = smi_event_handler(smi_info,
833                                                    SI_SHORT_TIMEOUT_USEC);
834                 }
835                 return;
836         }
837
838         spin_lock_irqsave(&smi_info->msg_lock, flags);
839         if (priority > 0)
840                 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
841         else
842                 list_add_tail(&msg->link, &smi_info->xmit_msgs);
843         spin_unlock_irqrestore(&smi_info->msg_lock, flags);
844
845         spin_lock_irqsave(&smi_info->si_lock, flags);
846         if ((smi_info->si_state == SI_NORMAL)
847             && (smi_info->curr_msg == NULL))
848         {
849                 start_next_msg(smi_info);
850         }
851         spin_unlock_irqrestore(&smi_info->si_lock, flags);
852 }
853
854 static void set_run_to_completion(void *send_info, int i_run_to_completion)
855 {
856         struct smi_info   *smi_info = send_info;
857         enum si_sm_result result;
858
859         smi_info->run_to_completion = i_run_to_completion;
860         if (i_run_to_completion) {
861                 result = smi_event_handler(smi_info, 0);
862                 while (result != SI_SM_IDLE) {
863                         udelay(SI_SHORT_TIMEOUT_USEC);
864                         result = smi_event_handler(smi_info,
865                                                    SI_SHORT_TIMEOUT_USEC);
866                 }
867         }
868 }
869
870 static int ipmi_thread(void *data)
871 {
872         struct smi_info *smi_info = data;
873         unsigned long flags;
874         enum si_sm_result smi_result;
875
876         set_user_nice(current, 19);
877         while (!kthread_should_stop()) {
878                 spin_lock_irqsave(&(smi_info->si_lock), flags);
879                 smi_result = smi_event_handler(smi_info, 0);
880                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
881                 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
882                         /* do nothing */
883                 }
884                 else if (smi_result == SI_SM_CALL_WITH_DELAY)
885                         schedule();
886                 else
887                         schedule_timeout_interruptible(1);
888         }
889         return 0;
890 }
891
892
893 static void poll(void *send_info)
894 {
895         struct smi_info *smi_info = send_info;
896         unsigned long flags;
897
898         /*
899          * Make sure there is some delay in the poll loop so we can
900          * drive time forward and timeout things.
901          */
902         udelay(10);
903         spin_lock_irqsave(&smi_info->si_lock, flags);
904         smi_event_handler(smi_info, 10);
905         spin_unlock_irqrestore(&smi_info->si_lock, flags);
906 }
907
908 static void request_events(void *send_info)
909 {
910         struct smi_info *smi_info = send_info;
911
912         if (atomic_read(&smi_info->stop_operation))
913                 return;
914
915         atomic_set(&smi_info->req_events, 1);
916 }
917
918 static int initialized;
919
920 static void smi_timeout(unsigned long data)
921 {
922         struct smi_info   *smi_info = (struct smi_info *) data;
923         enum si_sm_result smi_result;
924         unsigned long     flags;
925         unsigned long     jiffies_now;
926         long              time_diff;
927 #ifdef DEBUG_TIMING
928         struct timeval    t;
929 #endif
930
931         spin_lock_irqsave(&(smi_info->si_lock), flags);
932 #ifdef DEBUG_TIMING
933         do_gettimeofday(&t);
934         printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
935 #endif
936         jiffies_now = jiffies;
937         time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
938                      * SI_USEC_PER_JIFFY);
939         smi_result = smi_event_handler(smi_info, time_diff);
940
941         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
942
943         smi_info->last_timeout_jiffies = jiffies_now;
944
945         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
946                 /* Running with interrupts, only do long timeouts. */
947                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
948                 smi_inc_stat(smi_info, long_timeouts);
949                 goto do_add_timer;
950         }
951
952         /* If the state machine asks for a short delay, then shorten
953            the timer timeout. */
954         if (smi_result == SI_SM_CALL_WITH_DELAY) {
955                 smi_inc_stat(smi_info, short_timeouts);
956                 smi_info->si_timer.expires = jiffies + 1;
957         } else {
958                 smi_inc_stat(smi_info, long_timeouts);
959                 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
960         }
961
962  do_add_timer:
963         add_timer(&(smi_info->si_timer));
964 }
965
966 static irqreturn_t si_irq_handler(int irq, void *data)
967 {
968         struct smi_info *smi_info = data;
969         unsigned long   flags;
970 #ifdef DEBUG_TIMING
971         struct timeval  t;
972 #endif
973
974         spin_lock_irqsave(&(smi_info->si_lock), flags);
975
976         smi_inc_stat(smi_info, interrupts);
977
978 #ifdef DEBUG_TIMING
979         do_gettimeofday(&t);
980         printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
981 #endif
982         smi_event_handler(smi_info, 0);
983         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
984         return IRQ_HANDLED;
985 }
986
987 static irqreturn_t si_bt_irq_handler(int irq, void *data)
988 {
989         struct smi_info *smi_info = data;
990         /* We need to clear the IRQ flag for the BT interface. */
991         smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
992                              IPMI_BT_INTMASK_CLEAR_IRQ_BIT
993                              | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
994         return si_irq_handler(irq, data);
995 }
996
997 static int smi_start_processing(void       *send_info,
998                                 ipmi_smi_t intf)
999 {
1000         struct smi_info *new_smi = send_info;
1001         int             enable = 0;
1002
1003         new_smi->intf = intf;
1004
1005         /* Try to claim any interrupts. */
1006         if (new_smi->irq_setup)
1007                 new_smi->irq_setup(new_smi);
1008
1009         /* Set up the timer that drives the interface. */
1010         setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1011         new_smi->last_timeout_jiffies = jiffies;
1012         mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
1013
1014         /*
1015          * Check if the user forcefully enabled the daemon.
1016          */
1017         if (new_smi->intf_num < num_force_kipmid)
1018                 enable = force_kipmid[new_smi->intf_num];
1019         /*
1020          * The BT interface is efficient enough to not need a thread,
1021          * and there is no need for a thread if we have interrupts.
1022          */
1023         else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
1024                 enable = 1;
1025
1026         if (enable) {
1027                 new_smi->thread = kthread_run(ipmi_thread, new_smi,
1028                                               "kipmi%d", new_smi->intf_num);
1029                 if (IS_ERR(new_smi->thread)) {
1030                         printk(KERN_NOTICE "ipmi_si_intf: Could not start"
1031                                " kernel thread due to error %ld, only using"
1032                                " timers to drive the interface\n",
1033                                PTR_ERR(new_smi->thread));
1034                         new_smi->thread = NULL;
1035                 }
1036         }
1037
1038         return 0;
1039 }
1040
1041 static void set_maintenance_mode(void *send_info, int enable)
1042 {
1043         struct smi_info   *smi_info = send_info;
1044
1045         if (!enable)
1046                 atomic_set(&smi_info->req_events, 0);
1047 }
1048
1049 static struct ipmi_smi_handlers handlers =
1050 {
1051         .owner                  = THIS_MODULE,
1052         .start_processing       = smi_start_processing,
1053         .sender                 = sender,
1054         .request_events         = request_events,
1055         .set_maintenance_mode   = set_maintenance_mode,
1056         .set_run_to_completion  = set_run_to_completion,
1057         .poll                   = poll,
1058 };
1059
1060 /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1061    a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
1062
1063 static LIST_HEAD(smi_infos);
1064 static DEFINE_MUTEX(smi_infos_lock);
1065 static int smi_num; /* Used to sequence the SMIs */
1066
1067 #define DEFAULT_REGSPACING      1
1068 #define DEFAULT_REGSIZE         1
1069
1070 static int           si_trydefaults = 1;
1071 static char          *si_type[SI_MAX_PARMS];
1072 #define MAX_SI_TYPE_STR 30
1073 static char          si_type_str[MAX_SI_TYPE_STR];
1074 static unsigned long addrs[SI_MAX_PARMS];
1075 static unsigned int num_addrs;
1076 static unsigned int  ports[SI_MAX_PARMS];
1077 static unsigned int num_ports;
1078 static int           irqs[SI_MAX_PARMS];
1079 static unsigned int num_irqs;
1080 static int           regspacings[SI_MAX_PARMS];
1081 static unsigned int num_regspacings;
1082 static int           regsizes[SI_MAX_PARMS];
1083 static unsigned int num_regsizes;
1084 static int           regshifts[SI_MAX_PARMS];
1085 static unsigned int num_regshifts;
1086 static int slave_addrs[SI_MAX_PARMS];
1087 static unsigned int num_slave_addrs;
1088
1089 #define IPMI_IO_ADDR_SPACE  0
1090 #define IPMI_MEM_ADDR_SPACE 1
1091 static char *addr_space_to_str[] = { "i/o", "mem" };
1092
1093 static int hotmod_handler(const char *val, struct kernel_param *kp);
1094
1095 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1096 MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
1097                  " Documentation/IPMI.txt in the kernel sources for the"
1098                  " gory details.");
1099
1100 module_param_named(trydefaults, si_trydefaults, bool, 0);
1101 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1102                  " default scan of the KCS and SMIC interface at the standard"
1103                  " address");
1104 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1105 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1106                  " interface separated by commas.  The types are 'kcs',"
1107                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
1108                  " the first interface to kcs and the second to bt");
1109 module_param_array(addrs, ulong, &num_addrs, 0);
1110 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1111                  " addresses separated by commas.  Only use if an interface"
1112                  " is in memory.  Otherwise, set it to zero or leave"
1113                  " it blank.");
1114 module_param_array(ports, uint, &num_ports, 0);
1115 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1116                  " addresses separated by commas.  Only use if an interface"
1117                  " is a port.  Otherwise, set it to zero or leave"
1118                  " it blank.");
1119 module_param_array(irqs, int, &num_irqs, 0);
1120 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1121                  " addresses separated by commas.  Only use if an interface"
1122                  " has an interrupt.  Otherwise, set it to zero or leave"
1123                  " it blank.");
1124 module_param_array(regspacings, int, &num_regspacings, 0);
1125 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1126                  " and each successive register used by the interface.  For"
1127                  " instance, if the start address is 0xca2 and the spacing"
1128                  " is 2, then the second address is at 0xca4.  Defaults"
1129                  " to 1.");
1130 module_param_array(regsizes, int, &num_regsizes, 0);
1131 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1132                  " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1133                  " 16-bit, 32-bit, or 64-bit register.  Use this if you"
1134                  " the 8-bit IPMI register has to be read from a larger"
1135                  " register.");
1136 module_param_array(regshifts, int, &num_regshifts, 0);
1137 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1138                  " IPMI register, in bits.  For instance, if the data"
1139                  " is read from a 32-bit word and the IPMI data is in"
1140                  " bit 8-15, then the shift would be 8");
1141 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1142 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1143                  " the controller.  Normally this is 0x20, but can be"
1144                  " overridden by this parm.  This is an array indexed"
1145                  " by interface number.");
1146 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1147 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1148                  " disabled(0).  Normally the IPMI driver auto-detects"
1149                  " this, but the value may be overridden by this parm.");
1150 module_param(unload_when_empty, int, 0);
1151 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1152                  " specified or found, default is 1.  Setting to 0"
1153                  " is useful for hot add of devices using hotmod.");
1154
1155
1156 static void std_irq_cleanup(struct smi_info *info)
1157 {
1158         if (info->si_type == SI_BT)
1159                 /* Disable the interrupt in the BT interface. */
1160                 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1161         free_irq(info->irq, info);
1162 }
1163
1164 static int std_irq_setup(struct smi_info *info)
1165 {
1166         int rv;
1167
1168         if (!info->irq)
1169                 return 0;
1170
1171         if (info->si_type == SI_BT) {
1172                 rv = request_irq(info->irq,
1173                                  si_bt_irq_handler,
1174                                  IRQF_SHARED | IRQF_DISABLED,
1175                                  DEVICE_NAME,
1176                                  info);
1177                 if (!rv)
1178                         /* Enable the interrupt in the BT interface. */
1179                         info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1180                                          IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1181         } else
1182                 rv = request_irq(info->irq,
1183                                  si_irq_handler,
1184                                  IRQF_SHARED | IRQF_DISABLED,
1185                                  DEVICE_NAME,
1186                                  info);
1187         if (rv) {
1188                 printk(KERN_WARNING
1189                        "ipmi_si: %s unable to claim interrupt %d,"
1190                        " running polled\n",
1191                        DEVICE_NAME, info->irq);
1192                 info->irq = 0;
1193         } else {
1194                 info->irq_cleanup = std_irq_cleanup;
1195                 printk("  Using irq %d\n", info->irq);
1196         }
1197
1198         return rv;
1199 }
1200
1201 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1202 {
1203         unsigned int addr = io->addr_data;
1204
1205         return inb(addr + (offset * io->regspacing));
1206 }
1207
1208 static void port_outb(struct si_sm_io *io, unsigned int offset,
1209                       unsigned char b)
1210 {
1211         unsigned int addr = io->addr_data;
1212
1213         outb(b, addr + (offset * io->regspacing));
1214 }
1215
1216 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1217 {
1218         unsigned int addr = io->addr_data;
1219
1220         return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1221 }
1222
1223 static void port_outw(struct si_sm_io *io, unsigned int offset,
1224                       unsigned char b)
1225 {
1226         unsigned int addr = io->addr_data;
1227
1228         outw(b << io->regshift, addr + (offset * io->regspacing));
1229 }
1230
1231 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1232 {
1233         unsigned int addr = io->addr_data;
1234
1235         return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1236 }
1237
1238 static void port_outl(struct si_sm_io *io, unsigned int offset,
1239                       unsigned char b)
1240 {
1241         unsigned int addr = io->addr_data;
1242
1243         outl(b << io->regshift, addr+(offset * io->regspacing));
1244 }
1245
1246 static void port_cleanup(struct smi_info *info)
1247 {
1248         unsigned int addr = info->io.addr_data;
1249         int          idx;
1250
1251         if (addr) {
1252                 for (idx = 0; idx < info->io_size; idx++) {
1253                         release_region(addr + idx * info->io.regspacing,
1254                                        info->io.regsize);
1255                 }
1256         }
1257 }
1258
1259 static int port_setup(struct smi_info *info)
1260 {
1261         unsigned int addr = info->io.addr_data;
1262         int          idx;
1263
1264         if (!addr)
1265                 return -ENODEV;
1266
1267         info->io_cleanup = port_cleanup;
1268
1269         /* Figure out the actual inb/inw/inl/etc routine to use based
1270            upon the register size. */
1271         switch (info->io.regsize) {
1272         case 1:
1273                 info->io.inputb = port_inb;
1274                 info->io.outputb = port_outb;
1275                 break;
1276         case 2:
1277                 info->io.inputb = port_inw;
1278                 info->io.outputb = port_outw;
1279                 break;
1280         case 4:
1281                 info->io.inputb = port_inl;
1282                 info->io.outputb = port_outl;
1283                 break;
1284         default:
1285                 printk("ipmi_si: Invalid register size: %d\n",
1286                        info->io.regsize);
1287                 return -EINVAL;
1288         }
1289
1290         /* Some BIOSes reserve disjoint I/O regions in their ACPI
1291          * tables.  This causes problems when trying to register the
1292          * entire I/O region.  Therefore we must register each I/O
1293          * port separately.
1294          */
1295         for (idx = 0; idx < info->io_size; idx++) {
1296                 if (request_region(addr + idx * info->io.regspacing,
1297                                    info->io.regsize, DEVICE_NAME) == NULL) {
1298                         /* Undo allocations */
1299                         while (idx--) {
1300                                 release_region(addr + idx * info->io.regspacing,
1301                                                info->io.regsize);
1302                         }
1303                         return -EIO;
1304                 }
1305         }
1306         return 0;
1307 }
1308
1309 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1310 {
1311         return readb((io->addr)+(offset * io->regspacing));
1312 }
1313
1314 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1315                      unsigned char b)
1316 {
1317         writeb(b, (io->addr)+(offset * io->regspacing));
1318 }
1319
1320 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1321 {
1322         return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1323                 & 0xff;
1324 }
1325
1326 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1327                      unsigned char b)
1328 {
1329         writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1330 }
1331
1332 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1333 {
1334         return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1335                 & 0xff;
1336 }
1337
1338 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1339                      unsigned char b)
1340 {
1341         writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1342 }
1343
1344 #ifdef readq
1345 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1346 {
1347         return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1348                 & 0xff;
1349 }
1350
1351 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1352                      unsigned char b)
1353 {
1354         writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1355 }
1356 #endif
1357
1358 static void mem_cleanup(struct smi_info *info)
1359 {
1360         unsigned long addr = info->io.addr_data;
1361         int           mapsize;
1362
1363         if (info->io.addr) {
1364                 iounmap(info->io.addr);
1365
1366                 mapsize = ((info->io_size * info->io.regspacing)
1367                            - (info->io.regspacing - info->io.regsize));
1368
1369                 release_mem_region(addr, mapsize);
1370         }
1371 }
1372
1373 static int mem_setup(struct smi_info *info)
1374 {
1375         unsigned long addr = info->io.addr_data;
1376         int           mapsize;
1377
1378         if (!addr)
1379                 return -ENODEV;
1380
1381         info->io_cleanup = mem_cleanup;
1382
1383         /* Figure out the actual readb/readw/readl/etc routine to use based
1384            upon the register size. */
1385         switch (info->io.regsize) {
1386         case 1:
1387                 info->io.inputb = intf_mem_inb;
1388                 info->io.outputb = intf_mem_outb;
1389                 break;
1390         case 2:
1391                 info->io.inputb = intf_mem_inw;
1392                 info->io.outputb = intf_mem_outw;
1393                 break;
1394         case 4:
1395                 info->io.inputb = intf_mem_inl;
1396                 info->io.outputb = intf_mem_outl;
1397                 break;
1398 #ifdef readq
1399         case 8:
1400                 info->io.inputb = mem_inq;
1401                 info->io.outputb = mem_outq;
1402                 break;
1403 #endif
1404         default:
1405                 printk("ipmi_si: Invalid register size: %d\n",
1406                        info->io.regsize);
1407                 return -EINVAL;
1408         }
1409
1410         /* Calculate the total amount of memory to claim.  This is an
1411          * unusual looking calculation, but it avoids claiming any
1412          * more memory than it has to.  It will claim everything
1413          * between the first address to the end of the last full
1414          * register. */
1415         mapsize = ((info->io_size * info->io.regspacing)
1416                    - (info->io.regspacing - info->io.regsize));
1417
1418         if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1419                 return -EIO;
1420
1421         info->io.addr = ioremap(addr, mapsize);
1422         if (info->io.addr == NULL) {
1423                 release_mem_region(addr, mapsize);
1424                 return -EIO;
1425         }
1426         return 0;
1427 }
1428
1429 /*
1430  * Parms come in as <op1>[:op2[:op3...]].  ops are:
1431  *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1432  * Options are:
1433  *   rsp=<regspacing>
1434  *   rsi=<regsize>
1435  *   rsh=<regshift>
1436  *   irq=<irq>
1437  *   ipmb=<ipmb addr>
1438  */
1439 enum hotmod_op { HM_ADD, HM_REMOVE };
1440 struct hotmod_vals {
1441         char *name;
1442         int  val;
1443 };
1444 static struct hotmod_vals hotmod_ops[] = {
1445         { "add",        HM_ADD },
1446         { "remove",     HM_REMOVE },
1447         { NULL }
1448 };
1449 static struct hotmod_vals hotmod_si[] = {
1450         { "kcs",        SI_KCS },
1451         { "smic",       SI_SMIC },
1452         { "bt",         SI_BT },
1453         { NULL }
1454 };
1455 static struct hotmod_vals hotmod_as[] = {
1456         { "mem",        IPMI_MEM_ADDR_SPACE },
1457         { "i/o",        IPMI_IO_ADDR_SPACE },
1458         { NULL }
1459 };
1460
1461 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1462 {
1463         char *s;
1464         int  i;
1465
1466         s = strchr(*curr, ',');
1467         if (!s) {
1468                 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1469                 return -EINVAL;
1470         }
1471         *s = '\0';
1472         s++;
1473         for (i = 0; hotmod_ops[i].name; i++) {
1474                 if (strcmp(*curr, v[i].name) == 0) {
1475                         *val = v[i].val;
1476                         *curr = s;
1477                         return 0;
1478                 }
1479         }
1480
1481         printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1482         return -EINVAL;
1483 }
1484
1485 static int check_hotmod_int_op(const char *curr, const char *option,
1486                                const char *name, int *val)
1487 {
1488         char *n;
1489
1490         if (strcmp(curr, name) == 0) {
1491                 if (!option) {
1492                         printk(KERN_WARNING PFX
1493                                "No option given for '%s'\n",
1494                                curr);
1495                         return -EINVAL;
1496                 }
1497                 *val = simple_strtoul(option, &n, 0);
1498                 if ((*n != '\0') || (*option == '\0')) {
1499                         printk(KERN_WARNING PFX
1500                                "Bad option given for '%s'\n",
1501                                curr);
1502                         return -EINVAL;
1503                 }
1504                 return 1;
1505         }
1506         return 0;
1507 }
1508
1509 static int hotmod_handler(const char *val, struct kernel_param *kp)
1510 {
1511         char *str = kstrdup(val, GFP_KERNEL);
1512         int  rv;
1513         char *next, *curr, *s, *n, *o;
1514         enum hotmod_op op;
1515         enum si_type si_type;
1516         int  addr_space;
1517         unsigned long addr;
1518         int regspacing;
1519         int regsize;
1520         int regshift;
1521         int irq;
1522         int ipmb;
1523         int ival;
1524         int len;
1525         struct smi_info *info;
1526
1527         if (!str)
1528                 return -ENOMEM;
1529
1530         /* Kill any trailing spaces, as we can get a "\n" from echo. */
1531         len = strlen(str);
1532         ival = len - 1;
1533         while ((ival >= 0) && isspace(str[ival])) {
1534                 str[ival] = '\0';
1535                 ival--;
1536         }
1537
1538         for (curr = str; curr; curr = next) {
1539                 regspacing = 1;
1540                 regsize = 1;
1541                 regshift = 0;
1542                 irq = 0;
1543                 ipmb = 0x20;
1544
1545                 next = strchr(curr, ':');
1546                 if (next) {
1547                         *next = '\0';
1548                         next++;
1549                 }
1550
1551                 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1552                 if (rv)
1553                         break;
1554                 op = ival;
1555
1556                 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1557                 if (rv)
1558                         break;
1559                 si_type = ival;
1560
1561                 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1562                 if (rv)
1563                         break;
1564
1565                 s = strchr(curr, ',');
1566                 if (s) {
1567                         *s = '\0';
1568                         s++;
1569                 }
1570                 addr = simple_strtoul(curr, &n, 0);
1571                 if ((*n != '\0') || (*curr == '\0')) {
1572                         printk(KERN_WARNING PFX "Invalid hotmod address"
1573                                " '%s'\n", curr);
1574                         break;
1575                 }
1576
1577                 while (s) {
1578                         curr = s;
1579                         s = strchr(curr, ',');
1580                         if (s) {
1581                                 *s = '\0';
1582                                 s++;
1583                         }
1584                         o = strchr(curr, '=');
1585                         if (o) {
1586                                 *o = '\0';
1587                                 o++;
1588                         }
1589                         rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1590                         if (rv < 0)
1591                                 goto out;
1592                         else if (rv)
1593                                 continue;
1594                         rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1595                         if (rv < 0)
1596                                 goto out;
1597                         else if (rv)
1598                                 continue;
1599                         rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1600                         if (rv < 0)
1601                                 goto out;
1602                         else if (rv)
1603                                 continue;
1604                         rv = check_hotmod_int_op(curr, o, "irq", &irq);
1605                         if (rv < 0)
1606                                 goto out;
1607                         else if (rv)
1608                                 continue;
1609                         rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1610                         if (rv < 0)
1611                                 goto out;
1612                         else if (rv)
1613                                 continue;
1614
1615                         rv = -EINVAL;
1616                         printk(KERN_WARNING PFX
1617                                "Invalid hotmod option '%s'\n",
1618                                curr);
1619                         goto out;
1620                 }
1621
1622                 if (op == HM_ADD) {
1623                         info = kzalloc(sizeof(*info), GFP_KERNEL);
1624                         if (!info) {
1625                                 rv = -ENOMEM;
1626                                 goto out;
1627                         }
1628
1629                         info->addr_source = "hotmod";
1630                         info->si_type = si_type;
1631                         info->io.addr_data = addr;
1632                         info->io.addr_type = addr_space;
1633                         if (addr_space == IPMI_MEM_ADDR_SPACE)
1634                                 info->io_setup = mem_setup;
1635                         else
1636                                 info->io_setup = port_setup;
1637
1638                         info->io.addr = NULL;
1639                         info->io.regspacing = regspacing;
1640                         if (!info->io.regspacing)
1641                                 info->io.regspacing = DEFAULT_REGSPACING;
1642                         info->io.regsize = regsize;
1643                         if (!info->io.regsize)
1644                                 info->io.regsize = DEFAULT_REGSPACING;
1645                         info->io.regshift = regshift;
1646                         info->irq = irq;
1647                         if (info->irq)
1648                                 info->irq_setup = std_irq_setup;
1649                         info->slave_addr = ipmb;
1650
1651                         try_smi_init(info);
1652                 } else {
1653                         /* remove */
1654                         struct smi_info *e, *tmp_e;
1655
1656                         mutex_lock(&smi_infos_lock);
1657                         list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1658                                 if (e->io.addr_type != addr_space)
1659                                         continue;
1660                                 if (e->si_type != si_type)
1661                                         continue;
1662                                 if (e->io.addr_data == addr)
1663                                         cleanup_one_si(e);
1664                         }
1665                         mutex_unlock(&smi_infos_lock);
1666                 }
1667         }
1668         rv = len;
1669  out:
1670         kfree(str);
1671         return rv;
1672 }
1673
1674 static __devinit void hardcode_find_bmc(void)
1675 {
1676         int             i;
1677         struct smi_info *info;
1678
1679         for (i = 0; i < SI_MAX_PARMS; i++) {
1680                 if (!ports[i] && !addrs[i])
1681                         continue;
1682
1683                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1684                 if (!info)
1685                         return;
1686
1687                 info->addr_source = "hardcoded";
1688
1689                 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1690                         info->si_type = SI_KCS;
1691                 } else if (strcmp(si_type[i], "smic") == 0) {
1692                         info->si_type = SI_SMIC;
1693                 } else if (strcmp(si_type[i], "bt") == 0) {
1694                         info->si_type = SI_BT;
1695                 } else {
1696                         printk(KERN_WARNING
1697                                "ipmi_si: Interface type specified "
1698                                "for interface %d, was invalid: %s\n",
1699                                i, si_type[i]);
1700                         kfree(info);
1701                         continue;
1702                 }
1703
1704                 if (ports[i]) {
1705                         /* An I/O port */
1706                         info->io_setup = port_setup;
1707                         info->io.addr_data = ports[i];
1708                         info->io.addr_type = IPMI_IO_ADDR_SPACE;
1709                 } else if (addrs[i]) {
1710                         /* A memory port */
1711                         info->io_setup = mem_setup;
1712                         info->io.addr_data = addrs[i];
1713                         info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1714                 } else {
1715                         printk(KERN_WARNING
1716                                "ipmi_si: Interface type specified "
1717                                "for interface %d, "
1718                                "but port and address were not set or "
1719                                "set to zero.\n", i);
1720                         kfree(info);
1721                         continue;
1722                 }
1723
1724                 info->io.addr = NULL;
1725                 info->io.regspacing = regspacings[i];
1726                 if (!info->io.regspacing)
1727                         info->io.regspacing = DEFAULT_REGSPACING;
1728                 info->io.regsize = regsizes[i];
1729                 if (!info->io.regsize)
1730                         info->io.regsize = DEFAULT_REGSPACING;
1731                 info->io.regshift = regshifts[i];
1732                 info->irq = irqs[i];
1733                 if (info->irq)
1734                         info->irq_setup = std_irq_setup;
1735
1736                 try_smi_init(info);
1737         }
1738 }
1739
1740 #ifdef CONFIG_ACPI
1741
1742 #include <linux/acpi.h>
1743
1744 /* Once we get an ACPI failure, we don't try any more, because we go
1745    through the tables sequentially.  Once we don't find a table, there
1746    are no more. */
1747 static int acpi_failure;
1748
1749 /* For GPE-type interrupts. */
1750 static u32 ipmi_acpi_gpe(void *context)
1751 {
1752         struct smi_info *smi_info = context;
1753         unsigned long   flags;
1754 #ifdef DEBUG_TIMING
1755         struct timeval t;
1756 #endif
1757
1758         spin_lock_irqsave(&(smi_info->si_lock), flags);
1759
1760         smi_inc_stat(smi_info, interrupts);
1761
1762 #ifdef DEBUG_TIMING
1763         do_gettimeofday(&t);
1764         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1765 #endif
1766         smi_event_handler(smi_info, 0);
1767         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1768
1769         return ACPI_INTERRUPT_HANDLED;
1770 }
1771
1772 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1773 {
1774         if (!info->irq)
1775                 return;
1776
1777         acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1778 }
1779
1780 static int acpi_gpe_irq_setup(struct smi_info *info)
1781 {
1782         acpi_status status;
1783
1784         if (!info->irq)
1785                 return 0;
1786
1787         /* FIXME - is level triggered right? */
1788         status = acpi_install_gpe_handler(NULL,
1789                                           info->irq,
1790                                           ACPI_GPE_LEVEL_TRIGGERED,
1791                                           &ipmi_acpi_gpe,
1792                                           info);
1793         if (status != AE_OK) {
1794                 printk(KERN_WARNING
1795                        "ipmi_si: %s unable to claim ACPI GPE %d,"
1796                        " running polled\n",
1797                        DEVICE_NAME, info->irq);
1798                 info->irq = 0;
1799                 return -EINVAL;
1800         } else {
1801                 info->irq_cleanup = acpi_gpe_irq_cleanup;
1802                 printk("  Using ACPI GPE %d\n", info->irq);
1803                 return 0;
1804         }
1805 }
1806
1807 /*
1808  * Defined at
1809  * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1810  */
1811 struct SPMITable {
1812         s8      Signature[4];
1813         u32     Length;
1814         u8      Revision;
1815         u8      Checksum;
1816         s8      OEMID[6];
1817         s8      OEMTableID[8];
1818         s8      OEMRevision[4];
1819         s8      CreatorID[4];
1820         s8      CreatorRevision[4];
1821         u8      InterfaceType;
1822         u8      IPMIlegacy;
1823         s16     SpecificationRevision;
1824
1825         /*
1826          * Bit 0 - SCI interrupt supported
1827          * Bit 1 - I/O APIC/SAPIC
1828          */
1829         u8      InterruptType;
1830
1831         /* If bit 0 of InterruptType is set, then this is the SCI
1832            interrupt in the GPEx_STS register. */
1833         u8      GPE;
1834
1835         s16     Reserved;
1836
1837         /* If bit 1 of InterruptType is set, then this is the I/O
1838            APIC/SAPIC interrupt. */
1839         u32     GlobalSystemInterrupt;
1840
1841         /* The actual register address. */
1842         struct acpi_generic_address addr;
1843
1844         u8      UID[4];
1845
1846         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
1847 };
1848
1849 static __devinit int try_init_acpi(struct SPMITable *spmi)
1850 {
1851         struct smi_info  *info;
1852         u8               addr_space;
1853
1854         if (spmi->IPMIlegacy != 1) {
1855             printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1856             return -ENODEV;
1857         }
1858
1859         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1860                 addr_space = IPMI_MEM_ADDR_SPACE;
1861         else
1862                 addr_space = IPMI_IO_ADDR_SPACE;
1863
1864         info = kzalloc(sizeof(*info), GFP_KERNEL);
1865         if (!info) {
1866                 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1867                 return -ENOMEM;
1868         }
1869
1870         info->addr_source = "ACPI";
1871
1872         /* Figure out the interface type. */
1873         switch (spmi->InterfaceType)
1874         {
1875         case 1: /* KCS */
1876                 info->si_type = SI_KCS;
1877                 break;
1878         case 2: /* SMIC */
1879                 info->si_type = SI_SMIC;
1880                 break;
1881         case 3: /* BT */
1882                 info->si_type = SI_BT;
1883                 break;
1884         default:
1885                 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1886                         spmi->InterfaceType);
1887                 kfree(info);
1888                 return -EIO;
1889         }
1890
1891         if (spmi->InterruptType & 1) {
1892                 /* We've got a GPE interrupt. */
1893                 info->irq = spmi->GPE;
1894                 info->irq_setup = acpi_gpe_irq_setup;
1895         } else if (spmi->InterruptType & 2) {
1896                 /* We've got an APIC/SAPIC interrupt. */
1897                 info->irq = spmi->GlobalSystemInterrupt;
1898                 info->irq_setup = std_irq_setup;
1899         } else {
1900                 /* Use the default interrupt setting. */
1901                 info->irq = 0;
1902                 info->irq_setup = NULL;
1903         }
1904
1905         if (spmi->addr.bit_width) {
1906                 /* A (hopefully) properly formed register bit width. */
1907                 info->io.regspacing = spmi->addr.bit_width / 8;
1908         } else {
1909                 info->io.regspacing = DEFAULT_REGSPACING;
1910         }
1911         info->io.regsize = info->io.regspacing;
1912         info->io.regshift = spmi->addr.bit_offset;
1913
1914         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1915                 info->io_setup = mem_setup;
1916                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1917         } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1918                 info->io_setup = port_setup;
1919                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1920         } else {
1921                 kfree(info);
1922                 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1923                 return -EIO;
1924         }
1925         info->io.addr_data = spmi->addr.address;
1926
1927         try_smi_init(info);
1928
1929         return 0;
1930 }
1931
1932 static __devinit void acpi_find_bmc(void)
1933 {
1934         acpi_status      status;
1935         struct SPMITable *spmi;
1936         int              i;
1937
1938         if (acpi_disabled)
1939                 return;
1940
1941         if (acpi_failure)
1942                 return;
1943
1944         for (i = 0; ; i++) {
1945                 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
1946                                         (struct acpi_table_header **)&spmi);
1947                 if (status != AE_OK)
1948                         return;
1949
1950                 try_init_acpi(spmi);
1951         }
1952 }
1953 #endif
1954
1955 #ifdef CONFIG_DMI
1956 struct dmi_ipmi_data
1957 {
1958         u8              type;
1959         u8              addr_space;
1960         unsigned long   base_addr;
1961         u8              irq;
1962         u8              offset;
1963         u8              slave_addr;
1964 };
1965
1966 static int __devinit decode_dmi(const struct dmi_header *dm,
1967                                 struct dmi_ipmi_data *dmi)
1968 {
1969         const u8        *data = (const u8 *)dm;
1970         unsigned long   base_addr;
1971         u8              reg_spacing;
1972         u8              len = dm->length;
1973
1974         dmi->type = data[4];
1975
1976         memcpy(&base_addr, data+8, sizeof(unsigned long));
1977         if (len >= 0x11) {
1978                 if (base_addr & 1) {
1979                         /* I/O */
1980                         base_addr &= 0xFFFE;
1981                         dmi->addr_space = IPMI_IO_ADDR_SPACE;
1982                 }
1983                 else {
1984                         /* Memory */
1985                         dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1986                 }
1987                 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1988                    is odd. */
1989                 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1990
1991                 dmi->irq = data[0x11];
1992
1993                 /* The top two bits of byte 0x10 hold the register spacing. */
1994                 reg_spacing = (data[0x10] & 0xC0) >> 6;
1995                 switch(reg_spacing){
1996                 case 0x00: /* Byte boundaries */
1997                     dmi->offset = 1;
1998                     break;
1999                 case 0x01: /* 32-bit boundaries */
2000                     dmi->offset = 4;
2001                     break;
2002                 case 0x02: /* 16-byte boundaries */
2003                     dmi->offset = 16;
2004                     break;
2005                 default:
2006                     /* Some other interface, just ignore it. */
2007                     return -EIO;
2008                 }
2009         } else {
2010                 /* Old DMI spec. */
2011                 /* Note that technically, the lower bit of the base
2012                  * address should be 1 if the address is I/O and 0 if
2013                  * the address is in memory.  So many systems get that
2014                  * wrong (and all that I have seen are I/O) so we just
2015                  * ignore that bit and assume I/O.  Systems that use
2016                  * memory should use the newer spec, anyway. */
2017                 dmi->base_addr = base_addr & 0xfffe;
2018                 dmi->addr_space = IPMI_IO_ADDR_SPACE;
2019                 dmi->offset = 1;
2020         }
2021
2022         dmi->slave_addr = data[6];
2023
2024         return 0;
2025 }
2026
2027 static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2028 {
2029         struct smi_info *info;
2030
2031         info = kzalloc(sizeof(*info), GFP_KERNEL);
2032         if (!info) {
2033                 printk(KERN_ERR
2034                        "ipmi_si: Could not allocate SI data\n");
2035                 return;
2036         }
2037
2038         info->addr_source = "SMBIOS";
2039
2040         switch (ipmi_data->type) {
2041         case 0x01: /* KCS */
2042                 info->si_type = SI_KCS;
2043                 break;
2044         case 0x02: /* SMIC */
2045                 info->si_type = SI_SMIC;
2046                 break;
2047         case 0x03: /* BT */
2048                 info->si_type = SI_BT;
2049                 break;
2050         default:
2051                 kfree(info);
2052                 return;
2053         }
2054
2055         switch (ipmi_data->addr_space) {
2056         case IPMI_MEM_ADDR_SPACE:
2057                 info->io_setup = mem_setup;
2058                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2059                 break;
2060
2061         case IPMI_IO_ADDR_SPACE:
2062                 info->io_setup = port_setup;
2063                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2064                 break;
2065
2066         default:
2067                 kfree(info);
2068                 printk(KERN_WARNING
2069                        "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2070                        ipmi_data->addr_space);
2071                 return;
2072         }
2073         info->io.addr_data = ipmi_data->base_addr;
2074
2075         info->io.regspacing = ipmi_data->offset;
2076         if (!info->io.regspacing)
2077                 info->io.regspacing = DEFAULT_REGSPACING;
2078         info->io.regsize = DEFAULT_REGSPACING;
2079         info->io.regshift = 0;
2080
2081         info->slave_addr = ipmi_data->slave_addr;
2082
2083         info->irq = ipmi_data->irq;
2084         if (info->irq)
2085                 info->irq_setup = std_irq_setup;
2086
2087         try_smi_init(info);
2088 }
2089
2090 static void __devinit dmi_find_bmc(void)
2091 {
2092         const struct dmi_device *dev = NULL;
2093         struct dmi_ipmi_data data;
2094         int                  rv;
2095
2096         while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2097                 memset(&data, 0, sizeof(data));
2098                 rv = decode_dmi((const struct dmi_header *) dev->device_data,
2099                                 &data);
2100                 if (!rv)
2101                         try_init_dmi(&data);
2102         }
2103 }
2104 #endif /* CONFIG_DMI */
2105
2106 #ifdef CONFIG_PCI
2107
2108 #define PCI_ERMC_CLASSCODE              0x0C0700
2109 #define PCI_ERMC_CLASSCODE_MASK         0xffffff00
2110 #define PCI_ERMC_CLASSCODE_TYPE_MASK    0xff
2111 #define PCI_ERMC_CLASSCODE_TYPE_SMIC    0x00
2112 #define PCI_ERMC_CLASSCODE_TYPE_KCS     0x01
2113 #define PCI_ERMC_CLASSCODE_TYPE_BT      0x02
2114
2115 #define PCI_HP_VENDOR_ID    0x103C
2116 #define PCI_MMC_DEVICE_ID   0x121A
2117 #define PCI_MMC_ADDR_CW     0x10
2118
2119 static void ipmi_pci_cleanup(struct smi_info *info)
2120 {
2121         struct pci_dev *pdev = info->addr_source_data;
2122
2123         pci_disable_device(pdev);
2124 }
2125
2126 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2127                                     const struct pci_device_id *ent)
2128 {
2129         int rv;
2130         int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2131         struct smi_info *info;
2132         int first_reg_offset = 0;
2133
2134         info = kzalloc(sizeof(*info), GFP_KERNEL);
2135         if (!info)
2136                 return -ENOMEM;
2137
2138         info->addr_source = "PCI";
2139
2140         switch (class_type) {
2141         case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2142                 info->si_type = SI_SMIC;
2143                 break;
2144
2145         case PCI_ERMC_CLASSCODE_TYPE_KCS:
2146                 info->si_type = SI_KCS;
2147                 break;
2148
2149         case PCI_ERMC_CLASSCODE_TYPE_BT:
2150                 info->si_type = SI_BT;
2151                 break;
2152
2153         default:
2154                 kfree(info);
2155                 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
2156                        pci_name(pdev), class_type);
2157                 return -ENOMEM;
2158         }
2159
2160         rv = pci_enable_device(pdev);
2161         if (rv) {
2162                 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
2163                        pci_name(pdev));
2164                 kfree(info);
2165                 return rv;
2166         }
2167
2168         info->addr_source_cleanup = ipmi_pci_cleanup;
2169         info->addr_source_data = pdev;
2170
2171         if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
2172                 first_reg_offset = 1;
2173
2174         if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2175                 info->io_setup = port_setup;
2176                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2177         } else {
2178                 info->io_setup = mem_setup;
2179                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2180         }
2181         info->io.addr_data = pci_resource_start(pdev, 0);
2182
2183         info->io.regspacing = DEFAULT_REGSPACING;
2184         info->io.regsize = DEFAULT_REGSPACING;
2185         info->io.regshift = 0;
2186
2187         info->irq = pdev->irq;
2188         if (info->irq)
2189                 info->irq_setup = std_irq_setup;
2190
2191         info->dev = &pdev->dev;
2192         pci_set_drvdata(pdev, info);
2193
2194         return try_smi_init(info);
2195 }
2196
2197 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2198 {
2199         struct smi_info *info = pci_get_drvdata(pdev);
2200         cleanup_one_si(info);
2201 }
2202
2203 #ifdef CONFIG_PM
2204 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2205 {
2206         return 0;
2207 }
2208
2209 static int ipmi_pci_resume(struct pci_dev *pdev)
2210 {
2211         return 0;
2212 }
2213 #endif
2214
2215 static struct pci_device_id ipmi_pci_devices[] = {
2216         { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2217         { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
2218         { 0, }
2219 };
2220 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2221
2222 static struct pci_driver ipmi_pci_driver = {
2223         .name =         DEVICE_NAME,
2224         .id_table =     ipmi_pci_devices,
2225         .probe =        ipmi_pci_probe,
2226         .remove =       __devexit_p(ipmi_pci_remove),
2227 #ifdef CONFIG_PM
2228         .suspend =      ipmi_pci_suspend,
2229         .resume =       ipmi_pci_resume,
2230 #endif
2231 };
2232 #endif /* CONFIG_PCI */
2233
2234
2235 #ifdef CONFIG_PPC_OF
2236 static int __devinit ipmi_of_probe(struct of_device *dev,
2237                          const struct of_device_id *match)
2238 {
2239         struct smi_info *info;
2240         struct resource resource;
2241         const int *regsize, *regspacing, *regshift;
2242         struct device_node *np = dev->node;
2243         int ret;
2244         int proplen;
2245
2246         dev_info(&dev->dev, PFX "probing via device tree\n");
2247
2248         ret = of_address_to_resource(np, 0, &resource);
2249         if (ret) {
2250                 dev_warn(&dev->dev, PFX "invalid address from OF\n");
2251                 return ret;
2252         }
2253
2254         regsize = of_get_property(np, "reg-size", &proplen);
2255         if (regsize && proplen != 4) {
2256                 dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
2257                 return -EINVAL;
2258         }
2259
2260         regspacing = of_get_property(np, "reg-spacing", &proplen);
2261         if (regspacing && proplen != 4) {
2262                 dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
2263                 return -EINVAL;
2264         }
2265
2266         regshift = of_get_property(np, "reg-shift", &proplen);
2267         if (regshift && proplen != 4) {
2268                 dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
2269                 return -EINVAL;
2270         }
2271
2272         info = kzalloc(sizeof(*info), GFP_KERNEL);
2273
2274         if (!info) {
2275                 dev_err(&dev->dev,
2276                         PFX "could not allocate memory for OF probe\n");
2277                 return -ENOMEM;
2278         }
2279
2280         info->si_type           = (enum si_type) match->data;
2281         info->addr_source       = "device-tree";
2282         info->io_setup          = mem_setup;
2283         info->irq_setup         = std_irq_setup;
2284
2285         info->io.addr_type      = IPMI_MEM_ADDR_SPACE;
2286         info->io.addr_data      = resource.start;
2287
2288         info->io.regsize        = regsize ? *regsize : DEFAULT_REGSIZE;
2289         info->io.regspacing     = regspacing ? *regspacing : DEFAULT_REGSPACING;
2290         info->io.regshift       = regshift ? *regshift : 0;
2291
2292         info->irq               = irq_of_parse_and_map(dev->node, 0);
2293         info->dev               = &dev->dev;
2294
2295         dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n",
2296                 info->io.addr_data, info->io.regsize, info->io.regspacing,
2297                 info->irq);
2298
2299         dev->dev.driver_data = (void*) info;
2300
2301         return try_smi_init(info);
2302 }
2303
2304 static int __devexit ipmi_of_remove(struct of_device *dev)
2305 {
2306         cleanup_one_si(dev->dev.driver_data);
2307         return 0;
2308 }
2309
2310 static struct of_device_id ipmi_match[] =
2311 {
2312         { .type = "ipmi", .compatible = "ipmi-kcs",  .data = (void *)(unsigned long) SI_KCS },
2313         { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC },
2314         { .type = "ipmi", .compatible = "ipmi-bt",   .data = (void *)(unsigned long) SI_BT },
2315         {},
2316 };
2317
2318 static struct of_platform_driver ipmi_of_platform_driver =
2319 {
2320         .name           = "ipmi",
2321         .match_table    = ipmi_match,
2322         .probe          = ipmi_of_probe,
2323         .remove         = __devexit_p(ipmi_of_remove),
2324 };
2325 #endif /* CONFIG_PPC_OF */
2326
2327
2328 static int try_get_dev_id(struct smi_info *smi_info)
2329 {
2330         unsigned char         msg[2];
2331         unsigned char         *resp;
2332         unsigned long         resp_len;
2333         enum si_sm_result     smi_result;
2334         int                   rv = 0;
2335
2336         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2337         if (!resp)
2338                 return -ENOMEM;
2339
2340         /* Do a Get Device ID command, since it comes back with some
2341            useful info. */
2342         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2343         msg[1] = IPMI_GET_DEVICE_ID_CMD;
2344         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2345
2346         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2347         for (;;)
2348         {
2349                 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2350                     smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2351                         schedule_timeout_uninterruptible(1);
2352                         smi_result = smi_info->handlers->event(
2353                                 smi_info->si_sm, 100);
2354                 }
2355                 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2356                 {
2357                         smi_result = smi_info->handlers->event(
2358                                 smi_info->si_sm, 0);
2359                 }
2360                 else
2361                         break;
2362         }
2363         if (smi_result == SI_SM_HOSED) {
2364                 /* We couldn't get the state machine to run, so whatever's at
2365                    the port is probably not an IPMI SMI interface. */
2366                 rv = -ENODEV;
2367                 goto out;
2368         }
2369
2370         /* Otherwise, we got some data. */
2371         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2372                                                   resp, IPMI_MAX_MSG_LENGTH);
2373
2374         /* Check and record info from the get device id, in case we need it. */
2375         rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id);
2376
2377  out:
2378         kfree(resp);
2379         return rv;
2380 }
2381
2382 static int type_file_read_proc(char *page, char **start, off_t off,
2383                                int count, int *eof, void *data)
2384 {
2385         struct smi_info *smi = data;
2386
2387         return sprintf(page, "%s\n", si_to_str[smi->si_type]);
2388 }
2389
2390 static int stat_file_read_proc(char *page, char **start, off_t off,
2391                                int count, int *eof, void *data)
2392 {
2393         char            *out = (char *) page;
2394         struct smi_info *smi = data;
2395
2396         out += sprintf(out, "interrupts_enabled:    %d\n",
2397                        smi->irq && !smi->interrupt_disabled);
2398         out += sprintf(out, "short_timeouts:        %u\n",
2399                        smi_get_stat(smi, short_timeouts));
2400         out += sprintf(out, "long_timeouts:         %u\n",
2401                        smi_get_stat(smi, long_timeouts));
2402         out += sprintf(out, "timeout_restarts:      %u\n",
2403                        smi_get_stat(smi, timeout_restarts));
2404         out += sprintf(out, "idles:                 %u\n",
2405                        smi_get_stat(smi, idles));
2406         out += sprintf(out, "interrupts:            %u\n",
2407                        smi_get_stat(smi, interrupts));
2408         out += sprintf(out, "attentions:            %u\n",
2409                        smi_get_stat(smi, attentions));
2410         out += sprintf(out, "flag_fetches:          %u\n",
2411                        smi_get_stat(smi, flag_fetches));
2412         out += sprintf(out, "hosed_count:           %u\n",
2413                        smi_get_stat(smi, hosed_count));
2414         out += sprintf(out, "complete_transactions: %u\n",
2415                        smi_get_stat(smi, complete_transactions));
2416         out += sprintf(out, "events:                %u\n",
2417                        smi_get_stat(smi, events));
2418         out += sprintf(out, "watchdog_pretimeouts:  %u\n",
2419                        smi_get_stat(smi, watchdog_pretimeouts));
2420         out += sprintf(out, "incoming_messages:     %u\n",
2421                        smi_get_stat(smi, incoming_messages));
2422
2423         return out - page;
2424 }
2425
2426 static int param_read_proc(char *page, char **start, off_t off,
2427                            int count, int *eof, void *data)
2428 {
2429         struct smi_info *smi = data;
2430
2431         return sprintf(page,
2432                        "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2433                        si_to_str[smi->si_type],
2434                        addr_space_to_str[smi->io.addr_type],
2435                        smi->io.addr_data,
2436                        smi->io.regspacing,
2437                        smi->io.regsize,
2438                        smi->io.regshift,
2439                        smi->irq,
2440                        smi->slave_addr);
2441 }
2442
2443 /*
2444  * oem_data_avail_to_receive_msg_avail
2445  * @info - smi_info structure with msg_flags set
2446  *
2447  * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2448  * Returns 1 indicating need to re-run handle_flags().
2449  */
2450 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2451 {
2452         smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2453                                 RECEIVE_MSG_AVAIL);
2454         return 1;
2455 }
2456
2457 /*
2458  * setup_dell_poweredge_oem_data_handler
2459  * @info - smi_info.device_id must be populated
2460  *
2461  * Systems that match, but have firmware version < 1.40 may assert
2462  * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2463  * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
2464  * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2465  * as RECEIVE_MSG_AVAIL instead.
2466  *
2467  * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2468  * assert the OEM[012] bits, and if it did, the driver would have to
2469  * change to handle that properly, we don't actually check for the
2470  * firmware version.
2471  * Device ID = 0x20                BMC on PowerEdge 8G servers
2472  * Device Revision = 0x80
2473  * Firmware Revision1 = 0x01       BMC version 1.40
2474  * Firmware Revision2 = 0x40       BCD encoded
2475  * IPMI Version = 0x51             IPMI 1.5
2476  * Manufacturer ID = A2 02 00      Dell IANA
2477  *
2478  * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2479  * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2480  *
2481  */
2482 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
2483 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2484 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2485 #define DELL_IANA_MFR_ID 0x0002a2
2486 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2487 {
2488         struct ipmi_device_id *id = &smi_info->device_id;
2489         if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2490                 if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
2491                     id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2492                     id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2493                         smi_info->oem_data_avail_handler =
2494                                 oem_data_avail_to_receive_msg_avail;
2495                 }
2496                 else if (ipmi_version_major(id) < 1 ||
2497                          (ipmi_version_major(id) == 1 &&
2498                           ipmi_version_minor(id) < 5)) {
2499                         smi_info->oem_data_avail_handler =
2500                                 oem_data_avail_to_receive_msg_avail;
2501                 }
2502         }
2503 }
2504
2505 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2506 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2507 {
2508         struct ipmi_smi_msg *msg = smi_info->curr_msg;
2509
2510         /* Make it a reponse */
2511         msg->rsp[0] = msg->data[0] | 4;
2512         msg->rsp[1] = msg->data[1];
2513         msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2514         msg->rsp_size = 3;
2515         smi_info->curr_msg = NULL;
2516         deliver_recv_msg(smi_info, msg);
2517 }
2518
2519 /*
2520  * dell_poweredge_bt_xaction_handler
2521  * @info - smi_info.device_id must be populated
2522  *
2523  * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2524  * not respond to a Get SDR command if the length of the data
2525  * requested is exactly 0x3A, which leads to command timeouts and no
2526  * data returned.  This intercepts such commands, and causes userspace
2527  * callers to try again with a different-sized buffer, which succeeds.
2528  */
2529
2530 #define STORAGE_NETFN 0x0A
2531 #define STORAGE_CMD_GET_SDR 0x23
2532 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2533                                              unsigned long unused,
2534                                              void *in)
2535 {
2536         struct smi_info *smi_info = in;
2537         unsigned char *data = smi_info->curr_msg->data;
2538         unsigned int size   = smi_info->curr_msg->data_size;
2539         if (size >= 8 &&
2540             (data[0]>>2) == STORAGE_NETFN &&
2541             data[1] == STORAGE_CMD_GET_SDR &&
2542             data[7] == 0x3A) {
2543                 return_hosed_msg_badsize(smi_info);
2544                 return NOTIFY_STOP;
2545         }
2546         return NOTIFY_DONE;
2547 }
2548
2549 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2550         .notifier_call  = dell_poweredge_bt_xaction_handler,
2551 };
2552
2553 /*
2554  * setup_dell_poweredge_bt_xaction_handler
2555  * @info - smi_info.device_id must be filled in already
2556  *
2557  * Fills in smi_info.device_id.start_transaction_pre_hook
2558  * when we know what function to use there.
2559  */
2560 static void
2561 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2562 {
2563         struct ipmi_device_id *id = &smi_info->device_id;
2564         if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2565             smi_info->si_type == SI_BT)
2566                 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2567 }
2568
2569 /*
2570  * setup_oem_data_handler
2571  * @info - smi_info.device_id must be filled in already
2572  *
2573  * Fills in smi_info.device_id.oem_data_available_handler
2574  * when we know what function to use there.
2575  */
2576
2577 static void setup_oem_data_handler(struct smi_info *smi_info)
2578 {
2579         setup_dell_poweredge_oem_data_handler(smi_info);
2580 }
2581
2582 static void setup_xaction_handlers(struct smi_info *smi_info)
2583 {
2584         setup_dell_poweredge_bt_xaction_handler(smi_info);
2585 }
2586
2587 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2588 {
2589         if (smi_info->intf) {
2590                 /* The timer and thread are only running if the
2591                    interface has been started up and registered. */
2592                 if (smi_info->thread != NULL)
2593                         kthread_stop(smi_info->thread);
2594                 del_timer_sync(&smi_info->si_timer);
2595         }
2596 }
2597
2598 static __devinitdata struct ipmi_default_vals
2599 {
2600         int type;
2601         int port;
2602 } ipmi_defaults[] =
2603 {
2604         { .type = SI_KCS, .port = 0xca2 },
2605         { .type = SI_SMIC, .port = 0xca9 },
2606         { .type = SI_BT, .port = 0xe4 },
2607         { .port = 0 }
2608 };
2609
2610 static __devinit void default_find_bmc(void)
2611 {
2612         struct smi_info *info;
2613         int             i;
2614
2615         for (i = 0; ; i++) {
2616                 if (!ipmi_defaults[i].port)
2617                         break;
2618
2619                 info = kzalloc(sizeof(*info), GFP_KERNEL);
2620                 if (!info)
2621                         return;
2622
2623 #ifdef CONFIG_PPC_MERGE
2624                 if (check_legacy_ioport(ipmi_defaults[i].port))
2625                         continue;
2626 #endif
2627
2628                 info->addr_source = NULL;
2629
2630                 info->si_type = ipmi_defaults[i].type;
2631                 info->io_setup = port_setup;
2632                 info->io.addr_data = ipmi_defaults[i].port;
2633                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2634
2635                 info->io.addr = NULL;
2636                 info->io.regspacing = DEFAULT_REGSPACING;
2637                 info->io.regsize = DEFAULT_REGSPACING;
2638                 info->io.regshift = 0;
2639
2640                 if (try_smi_init(info) == 0) {
2641                         /* Found one... */
2642                         printk(KERN_INFO "ipmi_si: Found default %s state"
2643                                " machine at %s address 0x%lx\n",
2644                                si_to_str[info->si_type],
2645                                addr_space_to_str[info->io.addr_type],
2646                                info->io.addr_data);
2647                         return;
2648                 }
2649         }
2650 }
2651
2652 static int is_new_interface(struct smi_info *info)
2653 {
2654         struct smi_info *e;
2655
2656         list_for_each_entry(e, &smi_infos, link) {
2657                 if (e->io.addr_type != info->io.addr_type)
2658                         continue;
2659                 if (e->io.addr_data == info->io.addr_data)
2660                         return 0;
2661         }
2662
2663         return 1;
2664 }
2665
2666 static int try_smi_init(struct smi_info *new_smi)
2667 {
2668         int rv;
2669         int i;
2670
2671         if (new_smi->addr_source) {
2672                 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2673                        " machine at %s address 0x%lx, slave address 0x%x,"
2674                        " irq %d\n",
2675                        new_smi->addr_source,
2676                        si_to_str[new_smi->si_type],
2677                        addr_space_to_str[new_smi->io.addr_type],
2678                        new_smi->io.addr_data,
2679                        new_smi->slave_addr, new_smi->irq);
2680         }
2681
2682         mutex_lock(&smi_infos_lock);
2683         if (!is_new_interface(new_smi)) {
2684                 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2685                 rv = -EBUSY;
2686                 goto out_err;
2687         }
2688
2689         /* So we know not to free it unless we have allocated one. */
2690         new_smi->intf = NULL;
2691         new_smi->si_sm = NULL;
2692         new_smi->handlers = NULL;
2693
2694         switch (new_smi->si_type) {
2695         case SI_KCS:
2696                 new_smi->handlers = &kcs_smi_handlers;
2697                 break;
2698
2699         case SI_SMIC:
2700                 new_smi->handlers = &smic_smi_handlers;
2701                 break;
2702
2703         case SI_BT:
2704                 new_smi->handlers = &bt_smi_handlers;
2705                 break;
2706
2707         default:
2708                 /* No support for anything else yet. */
2709                 rv = -EIO;
2710                 goto out_err;
2711         }
2712
2713         /* Allocate the state machine's data and initialize it. */
2714         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2715         if (!new_smi->si_sm) {
2716                 printk(" Could not allocate state machine memory\n");
2717                 rv = -ENOMEM;
2718                 goto out_err;
2719         }
2720         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2721                                                         &new_smi->io);
2722
2723         /* Now that we know the I/O size, we can set up the I/O. */
2724         rv = new_smi->io_setup(new_smi);
2725         if (rv) {
2726                 printk(" Could not set up I/O space\n");
2727                 goto out_err;
2728         }
2729
2730         spin_lock_init(&(new_smi->si_lock));
2731         spin_lock_init(&(new_smi->msg_lock));
2732
2733         /* Do low-level detection first. */
2734         if (new_smi->handlers->detect(new_smi->si_sm)) {
2735                 if (new_smi->addr_source)
2736                         printk(KERN_INFO "ipmi_si: Interface detection"
2737                                " failed\n");
2738                 rv = -ENODEV;
2739                 goto out_err;
2740         }
2741
2742         /* Attempt a get device id command.  If it fails, we probably
2743            don't have a BMC here. */
2744         rv = try_get_dev_id(new_smi);
2745         if (rv) {
2746                 if (new_smi->addr_source)
2747                         printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2748                                " at this location\n");
2749                 goto out_err;
2750         }
2751
2752         setup_oem_data_handler(new_smi);
2753         setup_xaction_handlers(new_smi);
2754
2755         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2756         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2757         new_smi->curr_msg = NULL;
2758         atomic_set(&new_smi->req_events, 0);
2759         new_smi->run_to_completion = 0;
2760         for (i = 0; i < SI_NUM_STATS; i++)
2761                 atomic_set(&new_smi->stats[i], 0);
2762
2763         new_smi->interrupt_disabled = 0;
2764         atomic_set(&new_smi->stop_operation, 0);
2765         new_smi->intf_num = smi_num;
2766         smi_num++;
2767
2768         /* Start clearing the flags before we enable interrupts or the
2769            timer to avoid racing with the timer. */
2770         start_clear_flags(new_smi);
2771         /* IRQ is defined to be set when non-zero. */
2772         if (new_smi->irq)
2773                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2774
2775         if (!new_smi->dev) {
2776                 /* If we don't already have a device from something
2777                  * else (like PCI), then register a new one. */
2778                 new_smi->pdev = platform_device_alloc("ipmi_si",
2779                                                       new_smi->intf_num);
2780                 if (rv) {
2781                         printk(KERN_ERR
2782                                "ipmi_si_intf:"
2783                                " Unable to allocate platform device\n");
2784                         goto out_err;
2785                 }
2786                 new_smi->dev = &new_smi->pdev->dev;
2787                 new_smi->dev->driver = &ipmi_driver;
2788
2789                 rv = platform_device_add(new_smi->pdev);
2790                 if (rv) {
2791                         printk(KERN_ERR
2792                                "ipmi_si_intf:"
2793                                " Unable to register system interface device:"
2794                                " %d\n",
2795                                rv);
2796                         goto out_err;
2797                 }
2798                 new_smi->dev_registered = 1;
2799         }
2800
2801         rv = ipmi_register_smi(&handlers,
2802                                new_smi,
2803                                &new_smi->device_id,
2804                                new_smi->dev,
2805                                "bmc",
2806                                new_smi->slave_addr);
2807         if (rv) {
2808                 printk(KERN_ERR
2809                        "ipmi_si: Unable to register device: error %d\n",
2810                        rv);
2811                 goto out_err_stop_timer;
2812         }
2813
2814         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2815                                      type_file_read_proc, NULL,
2816                                      new_smi, THIS_MODULE);
2817         if (rv) {
2818                 printk(KERN_ERR
2819                        "ipmi_si: Unable to create proc entry: %d\n",
2820                        rv);
2821                 goto out_err_stop_timer;
2822         }
2823
2824         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2825                                      stat_file_read_proc, NULL,
2826                                      new_smi, THIS_MODULE);
2827         if (rv) {
2828                 printk(KERN_ERR
2829                        "ipmi_si: Unable to create proc entry: %d\n",
2830                        rv);
2831                 goto out_err_stop_timer;
2832         }
2833
2834         rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2835                                      param_read_proc, NULL,
2836                                      new_smi, THIS_MODULE);
2837         if (rv) {
2838                 printk(KERN_ERR
2839                        "ipmi_si: Unable to create proc entry: %d\n",
2840                        rv);
2841                 goto out_err_stop_timer;
2842         }
2843
2844         list_add_tail(&new_smi->link, &smi_infos);
2845
2846         mutex_unlock(&smi_infos_lock);
2847
2848         printk(KERN_INFO "IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2849
2850         return 0;
2851
2852  out_err_stop_timer:
2853         atomic_inc(&new_smi->stop_operation);
2854         wait_for_timer_and_thread(new_smi);
2855
2856  out_err:
2857         if (new_smi->intf)
2858                 ipmi_unregister_smi(new_smi->intf);
2859
2860         if (new_smi->irq_cleanup)
2861                 new_smi->irq_cleanup(new_smi);
2862
2863         /* Wait until we know that we are out of any interrupt
2864            handlers might have been running before we freed the
2865            interrupt. */
2866         synchronize_sched();
2867
2868         if (new_smi->si_sm) {
2869                 if (new_smi->handlers)
2870                         new_smi->handlers->cleanup(new_smi->si_sm);
2871                 kfree(new_smi->si_sm);
2872         }
2873         if (new_smi->addr_source_cleanup)
2874                 new_smi->addr_source_cleanup(new_smi);
2875         if (new_smi->io_cleanup)
2876                 new_smi->io_cleanup(new_smi);
2877
2878         if (new_smi->dev_registered)
2879                 platform_device_unregister(new_smi->pdev);
2880
2881         kfree(new_smi);
2882
2883         mutex_unlock(&smi_infos_lock);
2884
2885         return rv;
2886 }
2887
2888 static __devinit int init_ipmi_si(void)
2889 {
2890         int  i;
2891         char *str;
2892         int  rv;
2893
2894         if (initialized)
2895                 return 0;
2896         initialized = 1;
2897
2898         /* Register the device drivers. */
2899         rv = driver_register(&ipmi_driver);
2900         if (rv) {
2901                 printk(KERN_ERR
2902                        "init_ipmi_si: Unable to register driver: %d\n",
2903                        rv);
2904                 return rv;
2905         }
2906
2907
2908         /* Parse out the si_type string into its components. */
2909         str = si_type_str;
2910         if (*str != '\0') {
2911                 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2912                         si_type[i] = str;
2913                         str = strchr(str, ',');
2914                         if (str) {
2915                                 *str = '\0';
2916                                 str++;
2917                         } else {
2918                                 break;
2919                         }
2920                 }
2921         }
2922
2923         printk(KERN_INFO "IPMI System Interface driver.\n");
2924
2925         hardcode_find_bmc();
2926
2927 #ifdef CONFIG_DMI
2928         dmi_find_bmc();
2929 #endif
2930
2931 #ifdef CONFIG_ACPI
2932         acpi_find_bmc();
2933 #endif
2934
2935 #ifdef CONFIG_PCI
2936         rv = pci_register_driver(&ipmi_pci_driver);
2937         if (rv){
2938                 printk(KERN_ERR
2939                        "init_ipmi_si: Unable to register PCI driver: %d\n",
2940                        rv);
2941         }
2942 #endif
2943
2944 #ifdef CONFIG_PPC_OF
2945         of_register_platform_driver(&ipmi_of_platform_driver);
2946 #endif
2947
2948         if (si_trydefaults) {
2949                 mutex_lock(&smi_infos_lock);
2950                 if (list_empty(&smi_infos)) {
2951                         /* No BMC was found, try defaults. */
2952                         mutex_unlock(&smi_infos_lock);
2953                         default_find_bmc();
2954                 } else {
2955                         mutex_unlock(&smi_infos_lock);
2956                 }
2957         }
2958
2959         mutex_lock(&smi_infos_lock);
2960         if (unload_when_empty && list_empty(&smi_infos)) {
2961                 mutex_unlock(&smi_infos_lock);
2962 #ifdef CONFIG_PCI
2963                 pci_unregister_driver(&ipmi_pci_driver);
2964 #endif
2965
2966 #ifdef CONFIG_PPC_OF
2967                 of_unregister_platform_driver(&ipmi_of_platform_driver);
2968 #endif
2969                 driver_unregister(&ipmi_driver);
2970                 printk("ipmi_si: Unable to find any System Interface(s)\n");
2971                 return -ENODEV;
2972         } else {
2973                 mutex_unlock(&smi_infos_lock);
2974                 return 0;
2975         }
2976 }
2977 module_init(init_ipmi_si);
2978
2979 static void cleanup_one_si(struct smi_info *to_clean)
2980 {
2981         int           rv;
2982         unsigned long flags;
2983
2984         if (!to_clean)
2985                 return;
2986
2987         list_del(&to_clean->link);
2988
2989         /* Tell the driver that we are shutting down. */
2990         atomic_inc(&to_clean->stop_operation);
2991
2992         /* Make sure the timer and thread are stopped and will not run
2993            again. */
2994         wait_for_timer_and_thread(to_clean);
2995
2996         /* Timeouts are stopped, now make sure the interrupts are off
2997            for the device.  A little tricky with locks to make sure
2998            there are no races. */
2999         spin_lock_irqsave(&to_clean->si_lock, flags);
3000         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3001                 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3002                 poll(to_clean);
3003                 schedule_timeout_uninterruptible(1);
3004                 spin_lock_irqsave(&to_clean->si_lock, flags);
3005         }
3006         disable_si_irq(to_clean);
3007         spin_unlock_irqrestore(&to_clean->si_lock, flags);
3008         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3009                 poll(to_clean);
3010                 schedule_timeout_uninterruptible(1);
3011         }
3012
3013         /* Clean up interrupts and make sure that everything is done. */
3014         if (to_clean->irq_cleanup)
3015                 to_clean->irq_cleanup(to_clean);
3016         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3017                 poll(to_clean);
3018                 schedule_timeout_uninterruptible(1);
3019         }
3020
3021         rv = ipmi_unregister_smi(to_clean->intf);
3022         if (rv) {
3023                 printk(KERN_ERR
3024                        "ipmi_si: Unable to unregister device: errno=%d\n",
3025                        rv);
3026         }
3027
3028         to_clean->handlers->cleanup(to_clean->si_sm);
3029
3030         kfree(to_clean->si_sm);
3031
3032         if (to_clean->addr_source_cleanup)
3033                 to_clean->addr_source_cleanup(to_clean);
3034         if (to_clean->io_cleanup)
3035                 to_clean->io_cleanup(to_clean);
3036
3037         if (to_clean->dev_registered)
3038                 platform_device_unregister(to_clean->pdev);
3039
3040         kfree(to_clean);
3041 }
3042
3043 static __exit void cleanup_ipmi_si(void)
3044 {
3045         struct smi_info *e, *tmp_e;
3046
3047         if (!initialized)
3048                 return;
3049
3050 #ifdef CONFIG_PCI
3051         pci_unregister_driver(&ipmi_pci_driver);
3052 #endif
3053
3054 #ifdef CONFIG_PPC_OF
3055         of_unregister_platform_driver(&ipmi_of_platform_driver);
3056 #endif
3057
3058         mutex_lock(&smi_infos_lock);
3059         list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
3060                 cleanup_one_si(e);
3061         mutex_unlock(&smi_infos_lock);
3062
3063         driver_unregister(&ipmi_driver);
3064 }
3065 module_exit(cleanup_ipmi_si);
3066
3067 MODULE_LICENSE("GPL");
3068 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3069 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");