4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/sched.h>
38 #include <linux/poll.h>
39 #include <linux/spinlock.h>
40 #include <linux/mutex.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/ipmi_smi.h>
44 #include <linux/notifier.h>
45 #include <linux/init.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
49 #define PFX "IPMI message handler: "
51 #define IPMI_DRIVER_VERSION "39.0"
53 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
54 static int ipmi_init_msghandler(void);
56 static int initialized = 0;
59 static struct proc_dir_entry *proc_ipmi_root = NULL;
60 #endif /* CONFIG_PROC_FS */
62 #define MAX_EVENTS_IN_QUEUE 25
64 /* Don't let a message sit in a queue forever, always time it with at lest
65 the max message timer. This is in milliseconds. */
66 #define MAX_MSG_TIMEOUT 60000
70 * The main "user" data structure.
74 struct list_head link;
76 /* Set to "0" when the user is destroyed. */
81 /* The upper layer that handles receive messages. */
82 struct ipmi_user_hndl *handler;
85 /* The interface this user is bound to. */
88 /* Does this interface receive IPMI events? */
94 struct list_head link;
102 * This is used to form a linked lised during mass deletion.
103 * Since this is in an RCU list, we cannot use the link above
104 * or change any data until the RCU period completes. So we
105 * use this next variable during mass deletion so we can have
106 * a list and don't have to wait and restart the search on
107 * every individual deletion of a command. */
108 struct cmd_rcvr *next;
113 unsigned int inuse : 1;
114 unsigned int broadcast : 1;
116 unsigned long timeout;
117 unsigned long orig_timeout;
118 unsigned int retries_left;
120 /* To verify on an incoming send message response that this is
121 the message that the response is for, we keep a sequence id
122 and increment it every time we send a message. */
125 /* This is held so we can properly respond to the message on a
126 timeout, and it is used to hold the temporary data for
127 retransmission, too. */
128 struct ipmi_recv_msg *recv_msg;
131 /* Store the information in a msgid (long) to allow us to find a
132 sequence table entry from the msgid. */
133 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
135 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
137 seq = ((msgid >> 26) & 0x3f); \
138 seqid = (msgid & 0x3fffff); \
141 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
145 unsigned char medium;
146 unsigned char protocol;
148 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
149 but may be changed by the user. */
150 unsigned char address;
152 /* My LUN. This should generally stay the SMS LUN, but just in
157 #ifdef CONFIG_PROC_FS
158 struct ipmi_proc_entry
161 struct ipmi_proc_entry *next;
167 struct platform_device *dev;
168 struct ipmi_device_id id;
169 unsigned char guid[16];
172 struct kref refcount;
174 /* bmc device attributes */
175 struct device_attribute device_id_attr;
176 struct device_attribute provides_dev_sdrs_attr;
177 struct device_attribute revision_attr;
178 struct device_attribute firmware_rev_attr;
179 struct device_attribute version_attr;
180 struct device_attribute add_dev_support_attr;
181 struct device_attribute manufacturer_id_attr;
182 struct device_attribute product_id_attr;
183 struct device_attribute guid_attr;
184 struct device_attribute aux_firmware_rev_attr;
187 #define IPMI_IPMB_NUM_SEQ 64
188 #define IPMI_MAX_CHANNELS 16
191 /* What interface number are we? */
194 struct kref refcount;
196 /* The list of upper layers that are using me. seq_lock
198 struct list_head users;
200 /* Used for wake ups at startup. */
201 wait_queue_head_t waitq;
203 struct bmc_device *bmc;
206 /* This is the lower-layer's sender routine. */
207 struct ipmi_smi_handlers *handlers;
210 #ifdef CONFIG_PROC_FS
211 /* A list of proc entries for this interface. This does not
212 need a lock, only one thread creates it and only one thread
214 spinlock_t proc_entry_lock;
215 struct ipmi_proc_entry *proc_entries;
218 /* Driver-model device for the system interface. */
219 struct device *si_dev;
221 /* A table of sequence numbers for this interface. We use the
222 sequence numbers for IPMB messages that go out of the
223 interface to match them up with their responses. A routine
224 is called periodically to time the items in this list. */
226 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
229 /* Messages that were delayed for some reason (out of memory,
230 for instance), will go in here to be processed later in a
231 periodic timer interrupt. */
232 spinlock_t waiting_msgs_lock;
233 struct list_head waiting_msgs;
235 /* The list of command receivers that are registered for commands
236 on this interface. */
237 struct mutex cmd_rcvrs_mutex;
238 struct list_head cmd_rcvrs;
240 /* Events that were queues because no one was there to receive
242 spinlock_t events_lock; /* For dealing with event stuff. */
243 struct list_head waiting_events;
244 unsigned int waiting_events_count; /* How many events in queue? */
246 /* The event receiver for my BMC, only really used at panic
247 shutdown as a place to store this. */
248 unsigned char event_receiver;
249 unsigned char event_receiver_lun;
250 unsigned char local_sel_device;
251 unsigned char local_event_generator;
253 /* A cheap hack, if this is non-null and a message to an
254 interface comes in with a NULL user, call this routine with
255 it. Note that the message will still be freed by the
256 caller. This only works on the system interface. */
257 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
259 /* When we are scanning the channels for an SMI, this will
260 tell which channel we are scanning. */
263 /* Channel information */
264 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
267 struct proc_dir_entry *proc_dir;
268 char proc_dir_name[10];
270 spinlock_t counter_lock; /* For making counters atomic. */
272 /* Commands we got that were invalid. */
273 unsigned int sent_invalid_commands;
275 /* Commands we sent to the MC. */
276 unsigned int sent_local_commands;
277 /* Responses from the MC that were delivered to a user. */
278 unsigned int handled_local_responses;
279 /* Responses from the MC that were not delivered to a user. */
280 unsigned int unhandled_local_responses;
282 /* Commands we sent out to the IPMB bus. */
283 unsigned int sent_ipmb_commands;
284 /* Commands sent on the IPMB that had errors on the SEND CMD */
285 unsigned int sent_ipmb_command_errs;
286 /* Each retransmit increments this count. */
287 unsigned int retransmitted_ipmb_commands;
288 /* When a message times out (runs out of retransmits) this is
290 unsigned int timed_out_ipmb_commands;
292 /* This is like above, but for broadcasts. Broadcasts are
293 *not* included in the above count (they are expected to
295 unsigned int timed_out_ipmb_broadcasts;
297 /* Responses I have sent to the IPMB bus. */
298 unsigned int sent_ipmb_responses;
300 /* The response was delivered to the user. */
301 unsigned int handled_ipmb_responses;
302 /* The response had invalid data in it. */
303 unsigned int invalid_ipmb_responses;
304 /* The response didn't have anyone waiting for it. */
305 unsigned int unhandled_ipmb_responses;
307 /* Commands we sent out to the IPMB bus. */
308 unsigned int sent_lan_commands;
309 /* Commands sent on the IPMB that had errors on the SEND CMD */
310 unsigned int sent_lan_command_errs;
311 /* Each retransmit increments this count. */
312 unsigned int retransmitted_lan_commands;
313 /* When a message times out (runs out of retransmits) this is
315 unsigned int timed_out_lan_commands;
317 /* Responses I have sent to the IPMB bus. */
318 unsigned int sent_lan_responses;
320 /* The response was delivered to the user. */
321 unsigned int handled_lan_responses;
322 /* The response had invalid data in it. */
323 unsigned int invalid_lan_responses;
324 /* The response didn't have anyone waiting for it. */
325 unsigned int unhandled_lan_responses;
327 /* The command was delivered to the user. */
328 unsigned int handled_commands;
329 /* The command had invalid data in it. */
330 unsigned int invalid_commands;
331 /* The command didn't have anyone waiting for it. */
332 unsigned int unhandled_commands;
334 /* Invalid data in an event. */
335 unsigned int invalid_events;
336 /* Events that were received with the proper format. */
339 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
341 /* Used to mark an interface entry that cannot be used but is not a
342 * free entry, either, primarily used at creation and deletion time so
343 * a slot doesn't get reused too quickly. */
344 #define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1))
345 #define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
346 || (i == IPMI_INVALID_INTERFACE_ENTRY))
349 * The driver model view of the IPMI messaging driver.
351 static struct device_driver ipmidriver = {
353 .bus = &platform_bus_type
355 static DEFINE_MUTEX(ipmidriver_mutex);
357 #define MAX_IPMI_INTERFACES 4
358 static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
360 /* Directly protects the ipmi_interfaces data structure. */
361 static DEFINE_SPINLOCK(interfaces_lock);
363 /* List of watchers that want to know when smi's are added and
365 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
366 static DECLARE_RWSEM(smi_watchers_sem);
369 static void free_recv_msg_list(struct list_head *q)
371 struct ipmi_recv_msg *msg, *msg2;
373 list_for_each_entry_safe(msg, msg2, q, link) {
374 list_del(&msg->link);
375 ipmi_free_recv_msg(msg);
379 static void free_smi_msg_list(struct list_head *q)
381 struct ipmi_smi_msg *msg, *msg2;
383 list_for_each_entry_safe(msg, msg2, q, link) {
384 list_del(&msg->link);
385 ipmi_free_smi_msg(msg);
389 static void clean_up_interface_data(ipmi_smi_t intf)
392 struct cmd_rcvr *rcvr, *rcvr2;
393 struct list_head list;
395 free_smi_msg_list(&intf->waiting_msgs);
396 free_recv_msg_list(&intf->waiting_events);
398 /* Wholesale remove all the entries from the list in the
399 * interface and wait for RCU to know that none are in use. */
400 mutex_lock(&intf->cmd_rcvrs_mutex);
401 list_add_rcu(&list, &intf->cmd_rcvrs);
402 list_del_rcu(&intf->cmd_rcvrs);
403 mutex_unlock(&intf->cmd_rcvrs_mutex);
406 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
409 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
410 if ((intf->seq_table[i].inuse)
411 && (intf->seq_table[i].recv_msg))
413 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
418 static void intf_free(struct kref *ref)
420 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
422 clean_up_interface_data(intf);
426 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
431 down_write(&smi_watchers_sem);
432 list_add(&(watcher->link), &smi_watchers);
433 up_write(&smi_watchers_sem);
434 spin_lock_irqsave(&interfaces_lock, flags);
435 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
436 ipmi_smi_t intf = ipmi_interfaces[i];
437 if (IPMI_INVALID_INTERFACE(intf))
439 spin_unlock_irqrestore(&interfaces_lock, flags);
440 watcher->new_smi(i, intf->si_dev);
441 spin_lock_irqsave(&interfaces_lock, flags);
443 spin_unlock_irqrestore(&interfaces_lock, flags);
447 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
449 down_write(&smi_watchers_sem);
450 list_del(&(watcher->link));
451 up_write(&smi_watchers_sem);
456 call_smi_watchers(int i, struct device *dev)
458 struct ipmi_smi_watcher *w;
460 down_read(&smi_watchers_sem);
461 list_for_each_entry(w, &smi_watchers, link) {
462 if (try_module_get(w->owner)) {
464 module_put(w->owner);
467 up_read(&smi_watchers_sem);
471 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
473 if (addr1->addr_type != addr2->addr_type)
476 if (addr1->channel != addr2->channel)
479 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
480 struct ipmi_system_interface_addr *smi_addr1
481 = (struct ipmi_system_interface_addr *) addr1;
482 struct ipmi_system_interface_addr *smi_addr2
483 = (struct ipmi_system_interface_addr *) addr2;
484 return (smi_addr1->lun == smi_addr2->lun);
487 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
488 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
490 struct ipmi_ipmb_addr *ipmb_addr1
491 = (struct ipmi_ipmb_addr *) addr1;
492 struct ipmi_ipmb_addr *ipmb_addr2
493 = (struct ipmi_ipmb_addr *) addr2;
495 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
496 && (ipmb_addr1->lun == ipmb_addr2->lun));
499 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
500 struct ipmi_lan_addr *lan_addr1
501 = (struct ipmi_lan_addr *) addr1;
502 struct ipmi_lan_addr *lan_addr2
503 = (struct ipmi_lan_addr *) addr2;
505 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
506 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
507 && (lan_addr1->session_handle
508 == lan_addr2->session_handle)
509 && (lan_addr1->lun == lan_addr2->lun));
515 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
517 if (len < sizeof(struct ipmi_system_interface_addr)) {
521 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
522 if (addr->channel != IPMI_BMC_CHANNEL)
527 if ((addr->channel == IPMI_BMC_CHANNEL)
528 || (addr->channel >= IPMI_MAX_CHANNELS)
529 || (addr->channel < 0))
532 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
533 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
535 if (len < sizeof(struct ipmi_ipmb_addr)) {
541 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
542 if (len < sizeof(struct ipmi_lan_addr)) {
551 unsigned int ipmi_addr_length(int addr_type)
553 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
554 return sizeof(struct ipmi_system_interface_addr);
556 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
557 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
559 return sizeof(struct ipmi_ipmb_addr);
562 if (addr_type == IPMI_LAN_ADDR_TYPE)
563 return sizeof(struct ipmi_lan_addr);
568 static void deliver_response(struct ipmi_recv_msg *msg)
571 ipmi_smi_t intf = msg->user_msg_data;
574 /* Special handling for NULL users. */
575 if (intf->null_user_handler) {
576 intf->null_user_handler(intf, msg);
577 spin_lock_irqsave(&intf->counter_lock, flags);
578 intf->handled_local_responses++;
579 spin_unlock_irqrestore(&intf->counter_lock, flags);
581 /* No handler, so give up. */
582 spin_lock_irqsave(&intf->counter_lock, flags);
583 intf->unhandled_local_responses++;
584 spin_unlock_irqrestore(&intf->counter_lock, flags);
586 ipmi_free_recv_msg(msg);
588 ipmi_user_t user = msg->user;
589 user->handler->ipmi_recv_hndl(msg, user->handler_data);
593 /* Find the next sequence number not being used and add the given
594 message with the given timeout to the sequence table. This must be
595 called with the interface's seq_lock held. */
596 static int intf_next_seq(ipmi_smi_t intf,
597 struct ipmi_recv_msg *recv_msg,
598 unsigned long timeout,
607 for (i = intf->curr_seq;
608 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
609 i = (i+1)%IPMI_IPMB_NUM_SEQ)
611 if (!intf->seq_table[i].inuse)
615 if (!intf->seq_table[i].inuse) {
616 intf->seq_table[i].recv_msg = recv_msg;
618 /* Start with the maximum timeout, when the send response
619 comes in we will start the real timer. */
620 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
621 intf->seq_table[i].orig_timeout = timeout;
622 intf->seq_table[i].retries_left = retries;
623 intf->seq_table[i].broadcast = broadcast;
624 intf->seq_table[i].inuse = 1;
625 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
627 *seqid = intf->seq_table[i].seqid;
628 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
636 /* Return the receive message for the given sequence number and
637 release the sequence number so it can be reused. Some other data
638 is passed in to be sure the message matches up correctly (to help
639 guard against message coming in after their timeout and the
640 sequence number being reused). */
641 static int intf_find_seq(ipmi_smi_t intf,
646 struct ipmi_addr *addr,
647 struct ipmi_recv_msg **recv_msg)
652 if (seq >= IPMI_IPMB_NUM_SEQ)
655 spin_lock_irqsave(&(intf->seq_lock), flags);
656 if (intf->seq_table[seq].inuse) {
657 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
659 if ((msg->addr.channel == channel)
660 && (msg->msg.cmd == cmd)
661 && (msg->msg.netfn == netfn)
662 && (ipmi_addr_equal(addr, &(msg->addr))))
665 intf->seq_table[seq].inuse = 0;
669 spin_unlock_irqrestore(&(intf->seq_lock), flags);
675 /* Start the timer for a specific sequence table entry. */
676 static int intf_start_seq_timer(ipmi_smi_t intf,
685 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
687 spin_lock_irqsave(&(intf->seq_lock), flags);
688 /* We do this verification because the user can be deleted
689 while a message is outstanding. */
690 if ((intf->seq_table[seq].inuse)
691 && (intf->seq_table[seq].seqid == seqid))
693 struct seq_table *ent = &(intf->seq_table[seq]);
694 ent->timeout = ent->orig_timeout;
697 spin_unlock_irqrestore(&(intf->seq_lock), flags);
702 /* Got an error for the send message for a specific sequence number. */
703 static int intf_err_seq(ipmi_smi_t intf,
711 struct ipmi_recv_msg *msg = NULL;
714 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
716 spin_lock_irqsave(&(intf->seq_lock), flags);
717 /* We do this verification because the user can be deleted
718 while a message is outstanding. */
719 if ((intf->seq_table[seq].inuse)
720 && (intf->seq_table[seq].seqid == seqid))
722 struct seq_table *ent = &(intf->seq_table[seq]);
728 spin_unlock_irqrestore(&(intf->seq_lock), flags);
731 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
732 msg->msg_data[0] = err;
733 msg->msg.netfn |= 1; /* Convert to a response. */
734 msg->msg.data_len = 1;
735 msg->msg.data = msg->msg_data;
736 deliver_response(msg);
743 int ipmi_create_user(unsigned int if_num,
744 struct ipmi_user_hndl *handler,
749 ipmi_user_t new_user;
753 /* There is no module usecount here, because it's not
754 required. Since this can only be used by and called from
755 other modules, they will implicitly use this module, and
756 thus this can't be removed unless the other modules are
762 /* Make sure the driver is actually initialized, this handles
763 problems with initialization order. */
765 rv = ipmi_init_msghandler();
769 /* The init code doesn't return an error if it was turned
770 off, but it won't initialize. Check that. */
775 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
779 spin_lock_irqsave(&interfaces_lock, flags);
780 intf = ipmi_interfaces[if_num];
781 if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) {
782 spin_unlock_irqrestore(&interfaces_lock, flags);
787 /* Note that each existing user holds a refcount to the interface. */
788 kref_get(&intf->refcount);
789 spin_unlock_irqrestore(&interfaces_lock, flags);
791 kref_init(&new_user->refcount);
792 new_user->handler = handler;
793 new_user->handler_data = handler_data;
794 new_user->intf = intf;
795 new_user->gets_events = 0;
797 if (!try_module_get(intf->handlers->owner)) {
802 if (intf->handlers->inc_usecount) {
803 rv = intf->handlers->inc_usecount(intf->send_info);
805 module_put(intf->handlers->owner);
811 spin_lock_irqsave(&intf->seq_lock, flags);
812 list_add_rcu(&new_user->link, &intf->users);
813 spin_unlock_irqrestore(&intf->seq_lock, flags);
818 kref_put(&intf->refcount, intf_free);
824 static void free_user(struct kref *ref)
826 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
830 int ipmi_destroy_user(ipmi_user_t user)
832 ipmi_smi_t intf = user->intf;
835 struct cmd_rcvr *rcvr;
836 struct cmd_rcvr *rcvrs = NULL;
840 /* Remove the user from the interface's sequence table. */
841 spin_lock_irqsave(&intf->seq_lock, flags);
842 list_del_rcu(&user->link);
844 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
845 if (intf->seq_table[i].inuse
846 && (intf->seq_table[i].recv_msg->user == user))
848 intf->seq_table[i].inuse = 0;
851 spin_unlock_irqrestore(&intf->seq_lock, flags);
854 * Remove the user from the command receiver's table. First
855 * we build a list of everything (not using the standard link,
856 * since other things may be using it till we do
857 * synchronize_rcu()) then free everything in that list.
859 mutex_lock(&intf->cmd_rcvrs_mutex);
860 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
861 if (rcvr->user == user) {
862 list_del_rcu(&rcvr->link);
867 mutex_unlock(&intf->cmd_rcvrs_mutex);
875 module_put(intf->handlers->owner);
876 if (intf->handlers->dec_usecount)
877 intf->handlers->dec_usecount(intf->send_info);
879 kref_put(&intf->refcount, intf_free);
881 kref_put(&user->refcount, free_user);
886 void ipmi_get_version(ipmi_user_t user,
887 unsigned char *major,
888 unsigned char *minor)
890 *major = ipmi_version_major(&user->intf->bmc->id);
891 *minor = ipmi_version_minor(&user->intf->bmc->id);
894 int ipmi_set_my_address(ipmi_user_t user,
895 unsigned int channel,
896 unsigned char address)
898 if (channel >= IPMI_MAX_CHANNELS)
900 user->intf->channels[channel].address = address;
904 int ipmi_get_my_address(ipmi_user_t user,
905 unsigned int channel,
906 unsigned char *address)
908 if (channel >= IPMI_MAX_CHANNELS)
910 *address = user->intf->channels[channel].address;
914 int ipmi_set_my_LUN(ipmi_user_t user,
915 unsigned int channel,
918 if (channel >= IPMI_MAX_CHANNELS)
920 user->intf->channels[channel].lun = LUN & 0x3;
924 int ipmi_get_my_LUN(ipmi_user_t user,
925 unsigned int channel,
926 unsigned char *address)
928 if (channel >= IPMI_MAX_CHANNELS)
930 *address = user->intf->channels[channel].lun;
934 int ipmi_set_gets_events(ipmi_user_t user, int val)
937 ipmi_smi_t intf = user->intf;
938 struct ipmi_recv_msg *msg, *msg2;
939 struct list_head msgs;
941 INIT_LIST_HEAD(&msgs);
943 spin_lock_irqsave(&intf->events_lock, flags);
944 user->gets_events = val;
947 /* Deliver any queued events. */
948 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
949 list_move_tail(&msg->link, &msgs);
950 intf->waiting_events_count = 0;
953 /* Hold the events lock while doing this to preserve order. */
954 list_for_each_entry_safe(msg, msg2, &msgs, link) {
956 kref_get(&user->refcount);
957 deliver_response(msg);
960 spin_unlock_irqrestore(&intf->events_lock, flags);
965 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
970 struct cmd_rcvr *rcvr;
972 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
973 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
974 && (rcvr->chans & (1 << chan)))
980 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf,
985 struct cmd_rcvr *rcvr;
987 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
988 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
989 && (rcvr->chans & chans))
995 int ipmi_register_for_cmd(ipmi_user_t user,
1000 ipmi_smi_t intf = user->intf;
1001 struct cmd_rcvr *rcvr;
1005 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1009 rcvr->netfn = netfn;
1010 rcvr->chans = chans;
1013 mutex_lock(&intf->cmd_rcvrs_mutex);
1014 /* Make sure the command/netfn is not already registered. */
1015 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1020 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1023 mutex_unlock(&intf->cmd_rcvrs_mutex);
1030 int ipmi_unregister_for_cmd(ipmi_user_t user,
1031 unsigned char netfn,
1035 ipmi_smi_t intf = user->intf;
1036 struct cmd_rcvr *rcvr;
1037 struct cmd_rcvr *rcvrs = NULL;
1038 int i, rv = -ENOENT;
1040 mutex_lock(&intf->cmd_rcvrs_mutex);
1041 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1042 if (((1 << i) & chans) == 0)
1044 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1047 if (rcvr->user == user) {
1049 rcvr->chans &= ~chans;
1050 if (rcvr->chans == 0) {
1051 list_del_rcu(&rcvr->link);
1057 mutex_unlock(&intf->cmd_rcvrs_mutex);
1067 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
1069 ipmi_smi_t intf = user->intf;
1070 intf->handlers->set_run_to_completion(intf->send_info, val);
1073 static unsigned char
1074 ipmb_checksum(unsigned char *data, int size)
1076 unsigned char csum = 0;
1078 for (; size > 0; size--, data++)
1084 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1085 struct kernel_ipmi_msg *msg,
1086 struct ipmi_ipmb_addr *ipmb_addr,
1088 unsigned char ipmb_seq,
1090 unsigned char source_address,
1091 unsigned char source_lun)
1095 /* Format the IPMB header data. */
1096 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1097 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1098 smi_msg->data[2] = ipmb_addr->channel;
1100 smi_msg->data[3] = 0;
1101 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1102 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1103 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1104 smi_msg->data[i+6] = source_address;
1105 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1106 smi_msg->data[i+8] = msg->cmd;
1108 /* Now tack on the data to the message. */
1109 if (msg->data_len > 0)
1110 memcpy(&(smi_msg->data[i+9]), msg->data,
1112 smi_msg->data_size = msg->data_len + 9;
1114 /* Now calculate the checksum and tack it on. */
1115 smi_msg->data[i+smi_msg->data_size]
1116 = ipmb_checksum(&(smi_msg->data[i+6]),
1117 smi_msg->data_size-6);
1119 /* Add on the checksum size and the offset from the
1121 smi_msg->data_size += 1 + i;
1123 smi_msg->msgid = msgid;
1126 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1127 struct kernel_ipmi_msg *msg,
1128 struct ipmi_lan_addr *lan_addr,
1130 unsigned char ipmb_seq,
1131 unsigned char source_lun)
1133 /* Format the IPMB header data. */
1134 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1135 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1136 smi_msg->data[2] = lan_addr->channel;
1137 smi_msg->data[3] = lan_addr->session_handle;
1138 smi_msg->data[4] = lan_addr->remote_SWID;
1139 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1140 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1141 smi_msg->data[7] = lan_addr->local_SWID;
1142 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1143 smi_msg->data[9] = msg->cmd;
1145 /* Now tack on the data to the message. */
1146 if (msg->data_len > 0)
1147 memcpy(&(smi_msg->data[10]), msg->data,
1149 smi_msg->data_size = msg->data_len + 10;
1151 /* Now calculate the checksum and tack it on. */
1152 smi_msg->data[smi_msg->data_size]
1153 = ipmb_checksum(&(smi_msg->data[7]),
1154 smi_msg->data_size-7);
1156 /* Add on the checksum size and the offset from the
1158 smi_msg->data_size += 1;
1160 smi_msg->msgid = msgid;
1163 /* Separate from ipmi_request so that the user does not have to be
1164 supplied in certain circumstances (mainly at panic time). If
1165 messages are supplied, they will be freed, even if an error
1167 static int i_ipmi_request(ipmi_user_t user,
1169 struct ipmi_addr *addr,
1171 struct kernel_ipmi_msg *msg,
1172 void *user_msg_data,
1174 struct ipmi_recv_msg *supplied_recv,
1176 unsigned char source_address,
1177 unsigned char source_lun,
1179 unsigned int retry_time_ms)
1182 struct ipmi_smi_msg *smi_msg;
1183 struct ipmi_recv_msg *recv_msg;
1184 unsigned long flags;
1187 if (supplied_recv) {
1188 recv_msg = supplied_recv;
1190 recv_msg = ipmi_alloc_recv_msg();
1191 if (recv_msg == NULL) {
1195 recv_msg->user_msg_data = user_msg_data;
1198 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1200 smi_msg = ipmi_alloc_smi_msg();
1201 if (smi_msg == NULL) {
1202 ipmi_free_recv_msg(recv_msg);
1207 recv_msg->user = user;
1209 kref_get(&user->refcount);
1210 recv_msg->msgid = msgid;
1211 /* Store the message to send in the receive message so timeout
1212 responses can get the proper response data. */
1213 recv_msg->msg = *msg;
1215 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1216 struct ipmi_system_interface_addr *smi_addr;
1218 if (msg->netfn & 1) {
1219 /* Responses are not allowed to the SMI. */
1224 smi_addr = (struct ipmi_system_interface_addr *) addr;
1225 if (smi_addr->lun > 3) {
1226 spin_lock_irqsave(&intf->counter_lock, flags);
1227 intf->sent_invalid_commands++;
1228 spin_unlock_irqrestore(&intf->counter_lock, flags);
1233 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1235 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1236 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1237 || (msg->cmd == IPMI_GET_MSG_CMD)
1238 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1240 /* We don't let the user do these, since we manage
1241 the sequence numbers. */
1242 spin_lock_irqsave(&intf->counter_lock, flags);
1243 intf->sent_invalid_commands++;
1244 spin_unlock_irqrestore(&intf->counter_lock, flags);
1249 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1250 spin_lock_irqsave(&intf->counter_lock, flags);
1251 intf->sent_invalid_commands++;
1252 spin_unlock_irqrestore(&intf->counter_lock, flags);
1257 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1258 smi_msg->data[1] = msg->cmd;
1259 smi_msg->msgid = msgid;
1260 smi_msg->user_data = recv_msg;
1261 if (msg->data_len > 0)
1262 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1263 smi_msg->data_size = msg->data_len + 2;
1264 spin_lock_irqsave(&intf->counter_lock, flags);
1265 intf->sent_local_commands++;
1266 spin_unlock_irqrestore(&intf->counter_lock, flags);
1267 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1268 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1270 struct ipmi_ipmb_addr *ipmb_addr;
1271 unsigned char ipmb_seq;
1275 if (addr->channel >= IPMI_MAX_CHANNELS) {
1276 spin_lock_irqsave(&intf->counter_lock, flags);
1277 intf->sent_invalid_commands++;
1278 spin_unlock_irqrestore(&intf->counter_lock, flags);
1283 if (intf->channels[addr->channel].medium
1284 != IPMI_CHANNEL_MEDIUM_IPMB)
1286 spin_lock_irqsave(&intf->counter_lock, flags);
1287 intf->sent_invalid_commands++;
1288 spin_unlock_irqrestore(&intf->counter_lock, flags);
1294 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1295 retries = 0; /* Don't retry broadcasts. */
1299 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1300 /* Broadcasts add a zero at the beginning of the
1301 message, but otherwise is the same as an IPMB
1303 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1308 /* Default to 1 second retries. */
1309 if (retry_time_ms == 0)
1310 retry_time_ms = 1000;
1312 /* 9 for the header and 1 for the checksum, plus
1313 possibly one for the broadcast. */
1314 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1315 spin_lock_irqsave(&intf->counter_lock, flags);
1316 intf->sent_invalid_commands++;
1317 spin_unlock_irqrestore(&intf->counter_lock, flags);
1322 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1323 if (ipmb_addr->lun > 3) {
1324 spin_lock_irqsave(&intf->counter_lock, flags);
1325 intf->sent_invalid_commands++;
1326 spin_unlock_irqrestore(&intf->counter_lock, flags);
1331 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1333 if (recv_msg->msg.netfn & 0x1) {
1334 /* It's a response, so use the user's sequence
1336 spin_lock_irqsave(&intf->counter_lock, flags);
1337 intf->sent_ipmb_responses++;
1338 spin_unlock_irqrestore(&intf->counter_lock, flags);
1339 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1341 source_address, source_lun);
1343 /* Save the receive message so we can use it
1344 to deliver the response. */
1345 smi_msg->user_data = recv_msg;
1347 /* It's a command, so get a sequence for it. */
1349 spin_lock_irqsave(&(intf->seq_lock), flags);
1351 spin_lock(&intf->counter_lock);
1352 intf->sent_ipmb_commands++;
1353 spin_unlock(&intf->counter_lock);
1355 /* Create a sequence number with a 1 second
1356 timeout and 4 retries. */
1357 rv = intf_next_seq(intf,
1365 /* We have used up all the sequence numbers,
1366 probably, so abort. */
1367 spin_unlock_irqrestore(&(intf->seq_lock),
1372 /* Store the sequence number in the message,
1373 so that when the send message response
1374 comes back we can start the timer. */
1375 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1376 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1377 ipmb_seq, broadcast,
1378 source_address, source_lun);
1380 /* Copy the message into the recv message data, so we
1381 can retransmit it later if necessary. */
1382 memcpy(recv_msg->msg_data, smi_msg->data,
1383 smi_msg->data_size);
1384 recv_msg->msg.data = recv_msg->msg_data;
1385 recv_msg->msg.data_len = smi_msg->data_size;
1387 /* We don't unlock until here, because we need
1388 to copy the completed message into the
1389 recv_msg before we release the lock.
1390 Otherwise, race conditions may bite us. I
1391 know that's pretty paranoid, but I prefer
1393 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1395 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1396 struct ipmi_lan_addr *lan_addr;
1397 unsigned char ipmb_seq;
1400 if (addr->channel >= IPMI_MAX_CHANNELS) {
1401 spin_lock_irqsave(&intf->counter_lock, flags);
1402 intf->sent_invalid_commands++;
1403 spin_unlock_irqrestore(&intf->counter_lock, flags);
1408 if ((intf->channels[addr->channel].medium
1409 != IPMI_CHANNEL_MEDIUM_8023LAN)
1410 && (intf->channels[addr->channel].medium
1411 != IPMI_CHANNEL_MEDIUM_ASYNC))
1413 spin_lock_irqsave(&intf->counter_lock, flags);
1414 intf->sent_invalid_commands++;
1415 spin_unlock_irqrestore(&intf->counter_lock, flags);
1422 /* Default to 1 second retries. */
1423 if (retry_time_ms == 0)
1424 retry_time_ms = 1000;
1426 /* 11 for the header and 1 for the checksum. */
1427 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1428 spin_lock_irqsave(&intf->counter_lock, flags);
1429 intf->sent_invalid_commands++;
1430 spin_unlock_irqrestore(&intf->counter_lock, flags);
1435 lan_addr = (struct ipmi_lan_addr *) addr;
1436 if (lan_addr->lun > 3) {
1437 spin_lock_irqsave(&intf->counter_lock, flags);
1438 intf->sent_invalid_commands++;
1439 spin_unlock_irqrestore(&intf->counter_lock, flags);
1444 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1446 if (recv_msg->msg.netfn & 0x1) {
1447 /* It's a response, so use the user's sequence
1449 spin_lock_irqsave(&intf->counter_lock, flags);
1450 intf->sent_lan_responses++;
1451 spin_unlock_irqrestore(&intf->counter_lock, flags);
1452 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1455 /* Save the receive message so we can use it
1456 to deliver the response. */
1457 smi_msg->user_data = recv_msg;
1459 /* It's a command, so get a sequence for it. */
1461 spin_lock_irqsave(&(intf->seq_lock), flags);
1463 spin_lock(&intf->counter_lock);
1464 intf->sent_lan_commands++;
1465 spin_unlock(&intf->counter_lock);
1467 /* Create a sequence number with a 1 second
1468 timeout and 4 retries. */
1469 rv = intf_next_seq(intf,
1477 /* We have used up all the sequence numbers,
1478 probably, so abort. */
1479 spin_unlock_irqrestore(&(intf->seq_lock),
1484 /* Store the sequence number in the message,
1485 so that when the send message response
1486 comes back we can start the timer. */
1487 format_lan_msg(smi_msg, msg, lan_addr,
1488 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1489 ipmb_seq, source_lun);
1491 /* Copy the message into the recv message data, so we
1492 can retransmit it later if necessary. */
1493 memcpy(recv_msg->msg_data, smi_msg->data,
1494 smi_msg->data_size);
1495 recv_msg->msg.data = recv_msg->msg_data;
1496 recv_msg->msg.data_len = smi_msg->data_size;
1498 /* We don't unlock until here, because we need
1499 to copy the completed message into the
1500 recv_msg before we release the lock.
1501 Otherwise, race conditions may bite us. I
1502 know that's pretty paranoid, but I prefer
1504 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1507 /* Unknown address type. */
1508 spin_lock_irqsave(&intf->counter_lock, flags);
1509 intf->sent_invalid_commands++;
1510 spin_unlock_irqrestore(&intf->counter_lock, flags);
1518 for (m = 0; m < smi_msg->data_size; m++)
1519 printk(" %2.2x", smi_msg->data[m]);
1523 intf->handlers->sender(intf->send_info, smi_msg, priority);
1528 ipmi_free_smi_msg(smi_msg);
1529 ipmi_free_recv_msg(recv_msg);
1533 static int check_addr(ipmi_smi_t intf,
1534 struct ipmi_addr *addr,
1535 unsigned char *saddr,
1538 if (addr->channel >= IPMI_MAX_CHANNELS)
1540 *lun = intf->channels[addr->channel].lun;
1541 *saddr = intf->channels[addr->channel].address;
1545 int ipmi_request_settime(ipmi_user_t user,
1546 struct ipmi_addr *addr,
1548 struct kernel_ipmi_msg *msg,
1549 void *user_msg_data,
1552 unsigned int retry_time_ms)
1554 unsigned char saddr, lun;
1559 rv = check_addr(user->intf, addr, &saddr, &lun);
1562 return i_ipmi_request(user,
1576 int ipmi_request_supply_msgs(ipmi_user_t user,
1577 struct ipmi_addr *addr,
1579 struct kernel_ipmi_msg *msg,
1580 void *user_msg_data,
1582 struct ipmi_recv_msg *supplied_recv,
1585 unsigned char saddr, lun;
1590 rv = check_addr(user->intf, addr, &saddr, &lun);
1593 return i_ipmi_request(user,
1607 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1608 int count, int *eof, void *data)
1610 char *out = (char *) page;
1611 ipmi_smi_t intf = data;
1615 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1616 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1617 out[rv-1] = '\n'; /* Replace the final space with a newline */
1623 static int version_file_read_proc(char *page, char **start, off_t off,
1624 int count, int *eof, void *data)
1626 char *out = (char *) page;
1627 ipmi_smi_t intf = data;
1629 return sprintf(out, "%d.%d\n",
1630 ipmi_version_major(&intf->bmc->id),
1631 ipmi_version_minor(&intf->bmc->id));
1634 static int stat_file_read_proc(char *page, char **start, off_t off,
1635 int count, int *eof, void *data)
1637 char *out = (char *) page;
1638 ipmi_smi_t intf = data;
1640 out += sprintf(out, "sent_invalid_commands: %d\n",
1641 intf->sent_invalid_commands);
1642 out += sprintf(out, "sent_local_commands: %d\n",
1643 intf->sent_local_commands);
1644 out += sprintf(out, "handled_local_responses: %d\n",
1645 intf->handled_local_responses);
1646 out += sprintf(out, "unhandled_local_responses: %d\n",
1647 intf->unhandled_local_responses);
1648 out += sprintf(out, "sent_ipmb_commands: %d\n",
1649 intf->sent_ipmb_commands);
1650 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1651 intf->sent_ipmb_command_errs);
1652 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1653 intf->retransmitted_ipmb_commands);
1654 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1655 intf->timed_out_ipmb_commands);
1656 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1657 intf->timed_out_ipmb_broadcasts);
1658 out += sprintf(out, "sent_ipmb_responses: %d\n",
1659 intf->sent_ipmb_responses);
1660 out += sprintf(out, "handled_ipmb_responses: %d\n",
1661 intf->handled_ipmb_responses);
1662 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1663 intf->invalid_ipmb_responses);
1664 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1665 intf->unhandled_ipmb_responses);
1666 out += sprintf(out, "sent_lan_commands: %d\n",
1667 intf->sent_lan_commands);
1668 out += sprintf(out, "sent_lan_command_errs: %d\n",
1669 intf->sent_lan_command_errs);
1670 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1671 intf->retransmitted_lan_commands);
1672 out += sprintf(out, "timed_out_lan_commands: %d\n",
1673 intf->timed_out_lan_commands);
1674 out += sprintf(out, "sent_lan_responses: %d\n",
1675 intf->sent_lan_responses);
1676 out += sprintf(out, "handled_lan_responses: %d\n",
1677 intf->handled_lan_responses);
1678 out += sprintf(out, "invalid_lan_responses: %d\n",
1679 intf->invalid_lan_responses);
1680 out += sprintf(out, "unhandled_lan_responses: %d\n",
1681 intf->unhandled_lan_responses);
1682 out += sprintf(out, "handled_commands: %d\n",
1683 intf->handled_commands);
1684 out += sprintf(out, "invalid_commands: %d\n",
1685 intf->invalid_commands);
1686 out += sprintf(out, "unhandled_commands: %d\n",
1687 intf->unhandled_commands);
1688 out += sprintf(out, "invalid_events: %d\n",
1689 intf->invalid_events);
1690 out += sprintf(out, "events: %d\n",
1693 return (out - ((char *) page));
1696 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1697 read_proc_t *read_proc, write_proc_t *write_proc,
1698 void *data, struct module *owner)
1701 #ifdef CONFIG_PROC_FS
1702 struct proc_dir_entry *file;
1703 struct ipmi_proc_entry *entry;
1705 /* Create a list element. */
1706 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1709 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1714 strcpy(entry->name, name);
1716 file = create_proc_entry(name, 0, smi->proc_dir);
1724 file->read_proc = read_proc;
1725 file->write_proc = write_proc;
1726 file->owner = owner;
1728 spin_lock(&smi->proc_entry_lock);
1729 /* Stick it on the list. */
1730 entry->next = smi->proc_entries;
1731 smi->proc_entries = entry;
1732 spin_unlock(&smi->proc_entry_lock);
1734 #endif /* CONFIG_PROC_FS */
1739 static int add_proc_entries(ipmi_smi_t smi, int num)
1743 #ifdef CONFIG_PROC_FS
1744 sprintf(smi->proc_dir_name, "%d", num);
1745 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1749 smi->proc_dir->owner = THIS_MODULE;
1753 rv = ipmi_smi_add_proc_entry(smi, "stats",
1754 stat_file_read_proc, NULL,
1758 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1759 ipmb_file_read_proc, NULL,
1763 rv = ipmi_smi_add_proc_entry(smi, "version",
1764 version_file_read_proc, NULL,
1766 #endif /* CONFIG_PROC_FS */
1771 static void remove_proc_entries(ipmi_smi_t smi)
1773 #ifdef CONFIG_PROC_FS
1774 struct ipmi_proc_entry *entry;
1776 spin_lock(&smi->proc_entry_lock);
1777 while (smi->proc_entries) {
1778 entry = smi->proc_entries;
1779 smi->proc_entries = entry->next;
1781 remove_proc_entry(entry->name, smi->proc_dir);
1785 spin_unlock(&smi->proc_entry_lock);
1786 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1787 #endif /* CONFIG_PROC_FS */
1790 static int __find_bmc_guid(struct device *dev, void *data)
1792 unsigned char *id = data;
1793 struct bmc_device *bmc = dev_get_drvdata(dev);
1794 return memcmp(bmc->guid, id, 16) == 0;
1797 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1798 unsigned char *guid)
1802 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1804 return dev_get_drvdata(dev);
1809 struct prod_dev_id {
1810 unsigned int product_id;
1811 unsigned char device_id;
1814 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1816 struct prod_dev_id *id = data;
1817 struct bmc_device *bmc = dev_get_drvdata(dev);
1819 return (bmc->id.product_id == id->product_id
1820 && bmc->id.product_id == id->product_id
1821 && bmc->id.device_id == id->device_id);
1824 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1825 struct device_driver *drv,
1826 unsigned char product_id, unsigned char device_id)
1828 struct prod_dev_id id = {
1829 .product_id = product_id,
1830 .device_id = device_id,
1834 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
1836 return dev_get_drvdata(dev);
1841 static ssize_t device_id_show(struct device *dev,
1842 struct device_attribute *attr,
1845 struct bmc_device *bmc = dev_get_drvdata(dev);
1847 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
1850 static ssize_t provides_dev_sdrs_show(struct device *dev,
1851 struct device_attribute *attr,
1854 struct bmc_device *bmc = dev_get_drvdata(dev);
1856 return snprintf(buf, 10, "%u\n",
1857 bmc->id.device_revision && 0x80 >> 7);
1860 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
1863 struct bmc_device *bmc = dev_get_drvdata(dev);
1865 return snprintf(buf, 20, "%u\n",
1866 bmc->id.device_revision && 0x0F);
1869 static ssize_t firmware_rev_show(struct device *dev,
1870 struct device_attribute *attr,
1873 struct bmc_device *bmc = dev_get_drvdata(dev);
1875 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
1876 bmc->id.firmware_revision_2);
1879 static ssize_t ipmi_version_show(struct device *dev,
1880 struct device_attribute *attr,
1883 struct bmc_device *bmc = dev_get_drvdata(dev);
1885 return snprintf(buf, 20, "%u.%u\n",
1886 ipmi_version_major(&bmc->id),
1887 ipmi_version_minor(&bmc->id));
1890 static ssize_t add_dev_support_show(struct device *dev,
1891 struct device_attribute *attr,
1894 struct bmc_device *bmc = dev_get_drvdata(dev);
1896 return snprintf(buf, 10, "0x%02x\n",
1897 bmc->id.additional_device_support);
1900 static ssize_t manufacturer_id_show(struct device *dev,
1901 struct device_attribute *attr,
1904 struct bmc_device *bmc = dev_get_drvdata(dev);
1906 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
1909 static ssize_t product_id_show(struct device *dev,
1910 struct device_attribute *attr,
1913 struct bmc_device *bmc = dev_get_drvdata(dev);
1915 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
1918 static ssize_t aux_firmware_rev_show(struct device *dev,
1919 struct device_attribute *attr,
1922 struct bmc_device *bmc = dev_get_drvdata(dev);
1924 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
1925 bmc->id.aux_firmware_revision[3],
1926 bmc->id.aux_firmware_revision[2],
1927 bmc->id.aux_firmware_revision[1],
1928 bmc->id.aux_firmware_revision[0]);
1931 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
1934 struct bmc_device *bmc = dev_get_drvdata(dev);
1936 return snprintf(buf, 100, "%Lx%Lx\n",
1937 (long long) bmc->guid[0],
1938 (long long) bmc->guid[8]);
1941 static void remove_files(struct bmc_device *bmc)
1943 device_remove_file(&bmc->dev->dev,
1944 &bmc->device_id_attr);
1945 device_remove_file(&bmc->dev->dev,
1946 &bmc->provides_dev_sdrs_attr);
1947 device_remove_file(&bmc->dev->dev,
1948 &bmc->revision_attr);
1949 device_remove_file(&bmc->dev->dev,
1950 &bmc->firmware_rev_attr);
1951 device_remove_file(&bmc->dev->dev,
1952 &bmc->version_attr);
1953 device_remove_file(&bmc->dev->dev,
1954 &bmc->add_dev_support_attr);
1955 device_remove_file(&bmc->dev->dev,
1956 &bmc->manufacturer_id_attr);
1957 device_remove_file(&bmc->dev->dev,
1958 &bmc->product_id_attr);
1960 if (bmc->id.aux_firmware_revision_set)
1961 device_remove_file(&bmc->dev->dev,
1962 &bmc->aux_firmware_rev_attr);
1964 device_remove_file(&bmc->dev->dev,
1969 cleanup_bmc_device(struct kref *ref)
1971 struct bmc_device *bmc;
1973 bmc = container_of(ref, struct bmc_device, refcount);
1976 platform_device_unregister(bmc->dev);
1980 static void ipmi_bmc_unregister(ipmi_smi_t intf)
1982 struct bmc_device *bmc = intf->bmc;
1984 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
1985 if (intf->my_dev_name) {
1986 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
1987 kfree(intf->my_dev_name);
1988 intf->my_dev_name = NULL;
1991 mutex_lock(&ipmidriver_mutex);
1992 kref_put(&bmc->refcount, cleanup_bmc_device);
1993 mutex_unlock(&ipmidriver_mutex);
1996 static int create_files(struct bmc_device *bmc)
2000 err = device_create_file(&bmc->dev->dev,
2001 &bmc->device_id_attr);
2003 err = device_create_file(&bmc->dev->dev,
2004 &bmc->provides_dev_sdrs_attr);
2005 if (err) goto out_devid;
2006 err = device_create_file(&bmc->dev->dev,
2007 &bmc->revision_attr);
2008 if (err) goto out_sdrs;
2009 err = device_create_file(&bmc->dev->dev,
2010 &bmc->firmware_rev_attr);
2011 if (err) goto out_rev;
2012 err = device_create_file(&bmc->dev->dev,
2013 &bmc->version_attr);
2014 if (err) goto out_firm;
2015 err = device_create_file(&bmc->dev->dev,
2016 &bmc->add_dev_support_attr);
2017 if (err) goto out_version;
2018 err = device_create_file(&bmc->dev->dev,
2019 &bmc->manufacturer_id_attr);
2020 if (err) goto out_add_dev;
2021 err = device_create_file(&bmc->dev->dev,
2022 &bmc->product_id_attr);
2023 if (err) goto out_manu;
2024 if (bmc->id.aux_firmware_revision_set) {
2025 err = device_create_file(&bmc->dev->dev,
2026 &bmc->aux_firmware_rev_attr);
2027 if (err) goto out_prod_id;
2029 if (bmc->guid_set) {
2030 err = device_create_file(&bmc->dev->dev,
2032 if (err) goto out_aux_firm;
2038 if (bmc->id.aux_firmware_revision_set)
2039 device_remove_file(&bmc->dev->dev,
2040 &bmc->aux_firmware_rev_attr);
2042 device_remove_file(&bmc->dev->dev,
2043 &bmc->product_id_attr);
2045 device_remove_file(&bmc->dev->dev,
2046 &bmc->manufacturer_id_attr);
2048 device_remove_file(&bmc->dev->dev,
2049 &bmc->add_dev_support_attr);
2051 device_remove_file(&bmc->dev->dev,
2052 &bmc->version_attr);
2054 device_remove_file(&bmc->dev->dev,
2055 &bmc->firmware_rev_attr);
2057 device_remove_file(&bmc->dev->dev,
2058 &bmc->revision_attr);
2060 device_remove_file(&bmc->dev->dev,
2061 &bmc->provides_dev_sdrs_attr);
2063 device_remove_file(&bmc->dev->dev,
2064 &bmc->device_id_attr);
2069 static int ipmi_bmc_register(ipmi_smi_t intf)
2072 struct bmc_device *bmc = intf->bmc;
2073 struct bmc_device *old_bmc;
2077 mutex_lock(&ipmidriver_mutex);
2080 * Try to find if there is an bmc_device struct
2081 * representing the interfaced BMC already
2084 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
2086 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
2091 * If there is already an bmc_device, free the new one,
2092 * otherwise register the new BMC device
2096 intf->bmc = old_bmc;
2099 kref_get(&bmc->refcount);
2100 mutex_unlock(&ipmidriver_mutex);
2103 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2104 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2105 bmc->id.manufacturer_id,
2109 bmc->dev = platform_device_alloc("ipmi_bmc",
2114 " Unable to allocate platform device\n");
2117 bmc->dev->dev.driver = &ipmidriver;
2118 dev_set_drvdata(&bmc->dev->dev, bmc);
2119 kref_init(&bmc->refcount);
2121 rv = platform_device_register(bmc->dev);
2122 mutex_unlock(&ipmidriver_mutex);
2126 " Unable to register bmc device: %d\n",
2128 /* Don't go to out_err, you can only do that if
2129 the device is registered already. */
2133 bmc->device_id_attr.attr.name = "device_id";
2134 bmc->device_id_attr.attr.owner = THIS_MODULE;
2135 bmc->device_id_attr.attr.mode = S_IRUGO;
2136 bmc->device_id_attr.show = device_id_show;
2138 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2139 bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
2140 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2141 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2143 bmc->revision_attr.attr.name = "revision";
2144 bmc->revision_attr.attr.owner = THIS_MODULE;
2145 bmc->revision_attr.attr.mode = S_IRUGO;
2146 bmc->revision_attr.show = revision_show;
2148 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2149 bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
2150 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2151 bmc->firmware_rev_attr.show = firmware_rev_show;
2153 bmc->version_attr.attr.name = "ipmi_version";
2154 bmc->version_attr.attr.owner = THIS_MODULE;
2155 bmc->version_attr.attr.mode = S_IRUGO;
2156 bmc->version_attr.show = ipmi_version_show;
2158 bmc->add_dev_support_attr.attr.name
2159 = "additional_device_support";
2160 bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
2161 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2162 bmc->add_dev_support_attr.show = add_dev_support_show;
2164 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2165 bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
2166 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2167 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2169 bmc->product_id_attr.attr.name = "product_id";
2170 bmc->product_id_attr.attr.owner = THIS_MODULE;
2171 bmc->product_id_attr.attr.mode = S_IRUGO;
2172 bmc->product_id_attr.show = product_id_show;
2174 bmc->guid_attr.attr.name = "guid";
2175 bmc->guid_attr.attr.owner = THIS_MODULE;
2176 bmc->guid_attr.attr.mode = S_IRUGO;
2177 bmc->guid_attr.show = guid_show;
2179 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2180 bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
2181 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2182 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2184 rv = create_files(bmc);
2186 mutex_lock(&ipmidriver_mutex);
2187 platform_device_unregister(bmc->dev);
2188 mutex_unlock(&ipmidriver_mutex);
2194 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2195 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2196 bmc->id.manufacturer_id,
2202 * create symlink from system interface device to bmc device
2205 rv = sysfs_create_link(&intf->si_dev->kobj,
2206 &bmc->dev->dev.kobj, "bmc");
2209 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2214 size = snprintf(dummy, 0, "ipmi%d", intf->intf_num);
2215 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2216 if (!intf->my_dev_name) {
2219 "ipmi_msghandler: allocate link from BMC: %d\n",
2223 snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num);
2225 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2228 kfree(intf->my_dev_name);
2229 intf->my_dev_name = NULL;
2232 " Unable to create symlink to bmc: %d\n",
2240 ipmi_bmc_unregister(intf);
2245 send_guid_cmd(ipmi_smi_t intf, int chan)
2247 struct kernel_ipmi_msg msg;
2248 struct ipmi_system_interface_addr si;
2250 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2251 si.channel = IPMI_BMC_CHANNEL;
2254 msg.netfn = IPMI_NETFN_APP_REQUEST;
2255 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2258 return i_ipmi_request(NULL,
2260 (struct ipmi_addr *) &si,
2267 intf->channels[0].address,
2268 intf->channels[0].lun,
2273 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2275 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2276 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2277 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2281 if (msg->msg.data[0] != 0) {
2282 /* Error from getting the GUID, the BMC doesn't have one. */
2283 intf->bmc->guid_set = 0;
2287 if (msg->msg.data_len < 17) {
2288 intf->bmc->guid_set = 0;
2289 printk(KERN_WARNING PFX
2290 "guid_handler: The GUID response from the BMC was too"
2291 " short, it was %d but should have been 17. Assuming"
2292 " GUID is not available.\n",
2297 memcpy(intf->bmc->guid, msg->msg.data, 16);
2298 intf->bmc->guid_set = 1;
2300 wake_up(&intf->waitq);
2304 get_guid(ipmi_smi_t intf)
2308 intf->bmc->guid_set = 0x2;
2309 intf->null_user_handler = guid_handler;
2310 rv = send_guid_cmd(intf, 0);
2312 /* Send failed, no GUID available. */
2313 intf->bmc->guid_set = 0;
2314 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2315 intf->null_user_handler = NULL;
2319 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2321 struct kernel_ipmi_msg msg;
2322 unsigned char data[1];
2323 struct ipmi_system_interface_addr si;
2325 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2326 si.channel = IPMI_BMC_CHANNEL;
2329 msg.netfn = IPMI_NETFN_APP_REQUEST;
2330 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2334 return i_ipmi_request(NULL,
2336 (struct ipmi_addr *) &si,
2343 intf->channels[0].address,
2344 intf->channels[0].lun,
2349 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2354 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2355 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2356 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
2358 /* It's the one we want */
2359 if (msg->msg.data[0] != 0) {
2360 /* Got an error from the channel, just go on. */
2362 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2363 /* If the MC does not support this
2364 command, that is legal. We just
2365 assume it has one IPMB at channel
2367 intf->channels[0].medium
2368 = IPMI_CHANNEL_MEDIUM_IPMB;
2369 intf->channels[0].protocol
2370 = IPMI_CHANNEL_PROTOCOL_IPMB;
2373 intf->curr_channel = IPMI_MAX_CHANNELS;
2374 wake_up(&intf->waitq);
2379 if (msg->msg.data_len < 4) {
2380 /* Message not big enough, just go on. */
2383 chan = intf->curr_channel;
2384 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2385 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2388 intf->curr_channel++;
2389 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2390 wake_up(&intf->waitq);
2392 rv = send_channel_info_cmd(intf, intf->curr_channel);
2395 /* Got an error somehow, just give up. */
2396 intf->curr_channel = IPMI_MAX_CHANNELS;
2397 wake_up(&intf->waitq);
2399 printk(KERN_WARNING PFX
2400 "Error sending channel information: %d\n",
2408 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2410 struct ipmi_device_id *device_id,
2411 struct device *si_dev,
2412 unsigned char slave_addr)
2417 unsigned long flags;
2421 version_major = ipmi_version_major(device_id);
2422 version_minor = ipmi_version_minor(device_id);
2424 /* Make sure the driver is actually initialized, this handles
2425 problems with initialization order. */
2427 rv = ipmi_init_msghandler();
2430 /* The init code doesn't return an error if it was turned
2431 off, but it won't initialize. Check that. */
2436 intf = kmalloc(sizeof(*intf), GFP_KERNEL);
2439 memset(intf, 0, sizeof(*intf));
2440 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2445 intf->intf_num = -1;
2446 kref_init(&intf->refcount);
2447 intf->bmc->id = *device_id;
2448 intf->si_dev = si_dev;
2449 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2450 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2451 intf->channels[j].lun = 2;
2453 if (slave_addr != 0)
2454 intf->channels[0].address = slave_addr;
2455 INIT_LIST_HEAD(&intf->users);
2456 intf->handlers = handlers;
2457 intf->send_info = send_info;
2458 spin_lock_init(&intf->seq_lock);
2459 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2460 intf->seq_table[j].inuse = 0;
2461 intf->seq_table[j].seqid = 0;
2464 #ifdef CONFIG_PROC_FS
2465 spin_lock_init(&intf->proc_entry_lock);
2467 spin_lock_init(&intf->waiting_msgs_lock);
2468 INIT_LIST_HEAD(&intf->waiting_msgs);
2469 spin_lock_init(&intf->events_lock);
2470 INIT_LIST_HEAD(&intf->waiting_events);
2471 intf->waiting_events_count = 0;
2472 mutex_init(&intf->cmd_rcvrs_mutex);
2473 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2474 init_waitqueue_head(&intf->waitq);
2476 spin_lock_init(&intf->counter_lock);
2477 intf->proc_dir = NULL;
2480 spin_lock_irqsave(&interfaces_lock, flags);
2481 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2482 if (ipmi_interfaces[i] == NULL) {
2484 /* Reserve the entry till we are done. */
2485 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
2490 spin_unlock_irqrestore(&interfaces_lock, flags);
2494 rv = handlers->start_processing(send_info, intf);
2500 if ((version_major > 1)
2501 || ((version_major == 1) && (version_minor >= 5)))
2503 /* Start scanning the channels to see what is
2505 intf->null_user_handler = channel_handler;
2506 intf->curr_channel = 0;
2507 rv = send_channel_info_cmd(intf, 0);
2511 /* Wait for the channel info to be read. */
2512 wait_event(intf->waitq,
2513 intf->curr_channel >= IPMI_MAX_CHANNELS);
2514 intf->null_user_handler = NULL;
2516 /* Assume a single IPMB channel at zero. */
2517 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2518 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2522 rv = add_proc_entries(intf, i);
2524 rv = ipmi_bmc_register(intf);
2529 remove_proc_entries(intf);
2530 kref_put(&intf->refcount, intf_free);
2531 if (i < MAX_IPMI_INTERFACES) {
2532 spin_lock_irqsave(&interfaces_lock, flags);
2533 ipmi_interfaces[i] = NULL;
2534 spin_unlock_irqrestore(&interfaces_lock, flags);
2537 spin_lock_irqsave(&interfaces_lock, flags);
2538 ipmi_interfaces[i] = intf;
2539 spin_unlock_irqrestore(&interfaces_lock, flags);
2540 call_smi_watchers(i, intf->si_dev);
2546 int ipmi_unregister_smi(ipmi_smi_t intf)
2549 struct ipmi_smi_watcher *w;
2550 unsigned long flags;
2552 ipmi_bmc_unregister(intf);
2554 spin_lock_irqsave(&interfaces_lock, flags);
2555 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2556 if (ipmi_interfaces[i] == intf) {
2557 /* Set the interface number reserved until we
2559 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
2560 intf->intf_num = -1;
2564 spin_unlock_irqrestore(&interfaces_lock,flags);
2566 if (i == MAX_IPMI_INTERFACES)
2569 remove_proc_entries(intf);
2571 /* Call all the watcher interfaces to tell them that
2572 an interface is gone. */
2573 down_read(&smi_watchers_sem);
2574 list_for_each_entry(w, &smi_watchers, link)
2576 up_read(&smi_watchers_sem);
2578 /* Allow the entry to be reused now. */
2579 spin_lock_irqsave(&interfaces_lock, flags);
2580 ipmi_interfaces[i] = NULL;
2581 spin_unlock_irqrestore(&interfaces_lock,flags);
2583 kref_put(&intf->refcount, intf_free);
2587 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
2588 struct ipmi_smi_msg *msg)
2590 struct ipmi_ipmb_addr ipmb_addr;
2591 struct ipmi_recv_msg *recv_msg;
2592 unsigned long flags;
2595 /* This is 11, not 10, because the response must contain a
2596 * completion code. */
2597 if (msg->rsp_size < 11) {
2598 /* Message not big enough, just ignore it. */
2599 spin_lock_irqsave(&intf->counter_lock, flags);
2600 intf->invalid_ipmb_responses++;
2601 spin_unlock_irqrestore(&intf->counter_lock, flags);
2605 if (msg->rsp[2] != 0) {
2606 /* An error getting the response, just ignore it. */
2610 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
2611 ipmb_addr.slave_addr = msg->rsp[6];
2612 ipmb_addr.channel = msg->rsp[3] & 0x0f;
2613 ipmb_addr.lun = msg->rsp[7] & 3;
2615 /* It's a response from a remote entity. Look up the sequence
2616 number and handle the response. */
2617 if (intf_find_seq(intf,
2621 (msg->rsp[4] >> 2) & (~1),
2622 (struct ipmi_addr *) &(ipmb_addr),
2625 /* We were unable to find the sequence number,
2626 so just nuke the message. */
2627 spin_lock_irqsave(&intf->counter_lock, flags);
2628 intf->unhandled_ipmb_responses++;
2629 spin_unlock_irqrestore(&intf->counter_lock, flags);
2633 memcpy(recv_msg->msg_data,
2636 /* THe other fields matched, so no need to set them, except
2637 for netfn, which needs to be the response that was
2638 returned, not the request value. */
2639 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2640 recv_msg->msg.data = recv_msg->msg_data;
2641 recv_msg->msg.data_len = msg->rsp_size - 10;
2642 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2643 spin_lock_irqsave(&intf->counter_lock, flags);
2644 intf->handled_ipmb_responses++;
2645 spin_unlock_irqrestore(&intf->counter_lock, flags);
2646 deliver_response(recv_msg);
2651 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2652 struct ipmi_smi_msg *msg)
2654 struct cmd_rcvr *rcvr;
2656 unsigned char netfn;
2659 ipmi_user_t user = NULL;
2660 struct ipmi_ipmb_addr *ipmb_addr;
2661 struct ipmi_recv_msg *recv_msg;
2662 unsigned long flags;
2664 if (msg->rsp_size < 10) {
2665 /* Message not big enough, just ignore it. */
2666 spin_lock_irqsave(&intf->counter_lock, flags);
2667 intf->invalid_commands++;
2668 spin_unlock_irqrestore(&intf->counter_lock, flags);
2672 if (msg->rsp[2] != 0) {
2673 /* An error getting the response, just ignore it. */
2677 netfn = msg->rsp[4] >> 2;
2679 chan = msg->rsp[3] & 0xf;
2682 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2685 kref_get(&user->refcount);
2691 /* We didn't find a user, deliver an error response. */
2692 spin_lock_irqsave(&intf->counter_lock, flags);
2693 intf->unhandled_commands++;
2694 spin_unlock_irqrestore(&intf->counter_lock, flags);
2696 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2697 msg->data[1] = IPMI_SEND_MSG_CMD;
2698 msg->data[2] = msg->rsp[3];
2699 msg->data[3] = msg->rsp[6];
2700 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2701 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2702 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2704 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2705 msg->data[8] = msg->rsp[8]; /* cmd */
2706 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2707 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2708 msg->data_size = 11;
2713 printk("Invalid command:");
2714 for (m = 0; m < msg->data_size; m++)
2715 printk(" %2.2x", msg->data[m]);
2719 intf->handlers->sender(intf->send_info, msg, 0);
2721 rv = -1; /* We used the message, so return the value that
2722 causes it to not be freed or queued. */
2724 /* Deliver the message to the user. */
2725 spin_lock_irqsave(&intf->counter_lock, flags);
2726 intf->handled_commands++;
2727 spin_unlock_irqrestore(&intf->counter_lock, flags);
2729 recv_msg = ipmi_alloc_recv_msg();
2731 /* We couldn't allocate memory for the
2732 message, so requeue it for handling
2735 kref_put(&user->refcount, free_user);
2737 /* Extract the source address from the data. */
2738 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2739 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2740 ipmb_addr->slave_addr = msg->rsp[6];
2741 ipmb_addr->lun = msg->rsp[7] & 3;
2742 ipmb_addr->channel = msg->rsp[3] & 0xf;
2744 /* Extract the rest of the message information
2745 from the IPMB header.*/
2746 recv_msg->user = user;
2747 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2748 recv_msg->msgid = msg->rsp[7] >> 2;
2749 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2750 recv_msg->msg.cmd = msg->rsp[8];
2751 recv_msg->msg.data = recv_msg->msg_data;
2753 /* We chop off 10, not 9 bytes because the checksum
2754 at the end also needs to be removed. */
2755 recv_msg->msg.data_len = msg->rsp_size - 10;
2756 memcpy(recv_msg->msg_data,
2758 msg->rsp_size - 10);
2759 deliver_response(recv_msg);
2766 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2767 struct ipmi_smi_msg *msg)
2769 struct ipmi_lan_addr lan_addr;
2770 struct ipmi_recv_msg *recv_msg;
2771 unsigned long flags;
2774 /* This is 13, not 12, because the response must contain a
2775 * completion code. */
2776 if (msg->rsp_size < 13) {
2777 /* Message not big enough, just ignore it. */
2778 spin_lock_irqsave(&intf->counter_lock, flags);
2779 intf->invalid_lan_responses++;
2780 spin_unlock_irqrestore(&intf->counter_lock, flags);
2784 if (msg->rsp[2] != 0) {
2785 /* An error getting the response, just ignore it. */
2789 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
2790 lan_addr.session_handle = msg->rsp[4];
2791 lan_addr.remote_SWID = msg->rsp[8];
2792 lan_addr.local_SWID = msg->rsp[5];
2793 lan_addr.channel = msg->rsp[3] & 0x0f;
2794 lan_addr.privilege = msg->rsp[3] >> 4;
2795 lan_addr.lun = msg->rsp[9] & 3;
2797 /* It's a response from a remote entity. Look up the sequence
2798 number and handle the response. */
2799 if (intf_find_seq(intf,
2803 (msg->rsp[6] >> 2) & (~1),
2804 (struct ipmi_addr *) &(lan_addr),
2807 /* We were unable to find the sequence number,
2808 so just nuke the message. */
2809 spin_lock_irqsave(&intf->counter_lock, flags);
2810 intf->unhandled_lan_responses++;
2811 spin_unlock_irqrestore(&intf->counter_lock, flags);
2815 memcpy(recv_msg->msg_data,
2817 msg->rsp_size - 11);
2818 /* The other fields matched, so no need to set them, except
2819 for netfn, which needs to be the response that was
2820 returned, not the request value. */
2821 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2822 recv_msg->msg.data = recv_msg->msg_data;
2823 recv_msg->msg.data_len = msg->rsp_size - 12;
2824 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2825 spin_lock_irqsave(&intf->counter_lock, flags);
2826 intf->handled_lan_responses++;
2827 spin_unlock_irqrestore(&intf->counter_lock, flags);
2828 deliver_response(recv_msg);
2833 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2834 struct ipmi_smi_msg *msg)
2836 struct cmd_rcvr *rcvr;
2838 unsigned char netfn;
2841 ipmi_user_t user = NULL;
2842 struct ipmi_lan_addr *lan_addr;
2843 struct ipmi_recv_msg *recv_msg;
2844 unsigned long flags;
2846 if (msg->rsp_size < 12) {
2847 /* Message not big enough, just ignore it. */
2848 spin_lock_irqsave(&intf->counter_lock, flags);
2849 intf->invalid_commands++;
2850 spin_unlock_irqrestore(&intf->counter_lock, flags);
2854 if (msg->rsp[2] != 0) {
2855 /* An error getting the response, just ignore it. */
2859 netfn = msg->rsp[6] >> 2;
2861 chan = msg->rsp[3] & 0xf;
2864 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2867 kref_get(&user->refcount);
2873 /* We didn't find a user, just give up. */
2874 spin_lock_irqsave(&intf->counter_lock, flags);
2875 intf->unhandled_commands++;
2876 spin_unlock_irqrestore(&intf->counter_lock, flags);
2878 rv = 0; /* Don't do anything with these messages, just
2879 allow them to be freed. */
2881 /* Deliver the message to the user. */
2882 spin_lock_irqsave(&intf->counter_lock, flags);
2883 intf->handled_commands++;
2884 spin_unlock_irqrestore(&intf->counter_lock, flags);
2886 recv_msg = ipmi_alloc_recv_msg();
2888 /* We couldn't allocate memory for the
2889 message, so requeue it for handling
2892 kref_put(&user->refcount, free_user);
2894 /* Extract the source address from the data. */
2895 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
2896 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
2897 lan_addr->session_handle = msg->rsp[4];
2898 lan_addr->remote_SWID = msg->rsp[8];
2899 lan_addr->local_SWID = msg->rsp[5];
2900 lan_addr->lun = msg->rsp[9] & 3;
2901 lan_addr->channel = msg->rsp[3] & 0xf;
2902 lan_addr->privilege = msg->rsp[3] >> 4;
2904 /* Extract the rest of the message information
2905 from the IPMB header.*/
2906 recv_msg->user = user;
2907 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2908 recv_msg->msgid = msg->rsp[9] >> 2;
2909 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2910 recv_msg->msg.cmd = msg->rsp[10];
2911 recv_msg->msg.data = recv_msg->msg_data;
2913 /* We chop off 12, not 11 bytes because the checksum
2914 at the end also needs to be removed. */
2915 recv_msg->msg.data_len = msg->rsp_size - 12;
2916 memcpy(recv_msg->msg_data,
2918 msg->rsp_size - 12);
2919 deliver_response(recv_msg);
2926 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
2927 struct ipmi_smi_msg *msg)
2929 struct ipmi_system_interface_addr *smi_addr;
2931 recv_msg->msgid = 0;
2932 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
2933 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2934 smi_addr->channel = IPMI_BMC_CHANNEL;
2935 smi_addr->lun = msg->rsp[0] & 3;
2936 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
2937 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2938 recv_msg->msg.cmd = msg->rsp[1];
2939 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
2940 recv_msg->msg.data = recv_msg->msg_data;
2941 recv_msg->msg.data_len = msg->rsp_size - 3;
2944 static int handle_read_event_rsp(ipmi_smi_t intf,
2945 struct ipmi_smi_msg *msg)
2947 struct ipmi_recv_msg *recv_msg, *recv_msg2;
2948 struct list_head msgs;
2951 int deliver_count = 0;
2952 unsigned long flags;
2954 if (msg->rsp_size < 19) {
2955 /* Message is too small to be an IPMB event. */
2956 spin_lock_irqsave(&intf->counter_lock, flags);
2957 intf->invalid_events++;
2958 spin_unlock_irqrestore(&intf->counter_lock, flags);
2962 if (msg->rsp[2] != 0) {
2963 /* An error getting the event, just ignore it. */
2967 INIT_LIST_HEAD(&msgs);
2969 spin_lock_irqsave(&intf->events_lock, flags);
2971 spin_lock(&intf->counter_lock);
2973 spin_unlock(&intf->counter_lock);
2975 /* Allocate and fill in one message for every user that is getting
2978 list_for_each_entry_rcu(user, &intf->users, link) {
2979 if (!user->gets_events)
2982 recv_msg = ipmi_alloc_recv_msg();
2985 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
2987 list_del(&recv_msg->link);
2988 ipmi_free_recv_msg(recv_msg);
2990 /* We couldn't allocate memory for the
2991 message, so requeue it for handling
2999 copy_event_into_recv_msg(recv_msg, msg);
3000 recv_msg->user = user;
3001 kref_get(&user->refcount);
3002 list_add_tail(&(recv_msg->link), &msgs);
3006 if (deliver_count) {
3007 /* Now deliver all the messages. */
3008 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
3009 list_del(&recv_msg->link);
3010 deliver_response(recv_msg);
3012 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
3013 /* No one to receive the message, put it in queue if there's
3014 not already too many things in the queue. */
3015 recv_msg = ipmi_alloc_recv_msg();
3017 /* We couldn't allocate memory for the
3018 message, so requeue it for handling
3024 copy_event_into_recv_msg(recv_msg, msg);
3025 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
3026 intf->waiting_events_count++;
3028 /* There's too many things in the queue, discard this
3030 printk(KERN_WARNING PFX "Event queue full, discarding an"
3031 " incoming event\n");
3035 spin_unlock_irqrestore(&(intf->events_lock), flags);
3040 static int handle_bmc_rsp(ipmi_smi_t intf,
3041 struct ipmi_smi_msg *msg)
3043 struct ipmi_recv_msg *recv_msg;
3044 unsigned long flags;
3045 struct ipmi_user *user;
3047 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3048 if (recv_msg == NULL)
3050 printk(KERN_WARNING"IPMI message received with no owner. This\n"
3051 "could be because of a malformed message, or\n"
3052 "because of a hardware error. Contact your\n"
3053 "hardware vender for assistance\n");
3057 user = recv_msg->user;
3058 /* Make sure the user still exists. */
3059 if (user && !user->valid) {
3060 /* The user for the message went away, so give up. */
3061 spin_lock_irqsave(&intf->counter_lock, flags);
3062 intf->unhandled_local_responses++;
3063 spin_unlock_irqrestore(&intf->counter_lock, flags);
3064 ipmi_free_recv_msg(recv_msg);
3066 struct ipmi_system_interface_addr *smi_addr;
3068 spin_lock_irqsave(&intf->counter_lock, flags);
3069 intf->handled_local_responses++;
3070 spin_unlock_irqrestore(&intf->counter_lock, flags);
3071 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3072 recv_msg->msgid = msg->msgid;
3073 smi_addr = ((struct ipmi_system_interface_addr *)
3075 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3076 smi_addr->channel = IPMI_BMC_CHANNEL;
3077 smi_addr->lun = msg->rsp[0] & 3;
3078 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3079 recv_msg->msg.cmd = msg->rsp[1];
3080 memcpy(recv_msg->msg_data,
3083 recv_msg->msg.data = recv_msg->msg_data;
3084 recv_msg->msg.data_len = msg->rsp_size - 2;
3085 deliver_response(recv_msg);
3091 /* Handle a new message. Return 1 if the message should be requeued,
3092 0 if the message should be freed, or -1 if the message should not
3093 be freed or requeued. */
3094 static int handle_new_recv_msg(ipmi_smi_t intf,
3095 struct ipmi_smi_msg *msg)
3103 for (m = 0; m < msg->rsp_size; m++)
3104 printk(" %2.2x", msg->rsp[m]);
3107 if (msg->rsp_size < 2) {
3108 /* Message is too small to be correct. */
3109 printk(KERN_WARNING PFX "BMC returned to small a message"
3110 " for netfn %x cmd %x, got %d bytes\n",
3111 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3113 /* Generate an error response for the message. */
3114 msg->rsp[0] = msg->data[0] | (1 << 2);
3115 msg->rsp[1] = msg->data[1];
3116 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3118 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
3119 || (msg->rsp[1] != msg->data[1])) /* Command */
3121 /* The response is not even marginally correct. */
3122 printk(KERN_WARNING PFX "BMC returned incorrect response,"
3123 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3124 (msg->data[0] >> 2) | 1, msg->data[1],
3125 msg->rsp[0] >> 2, msg->rsp[1]);
3127 /* Generate an error response for the message. */
3128 msg->rsp[0] = msg->data[0] | (1 << 2);
3129 msg->rsp[1] = msg->data[1];
3130 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3134 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3135 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3136 && (msg->user_data != NULL))
3138 /* It's a response to a response we sent. For this we
3139 deliver a send message response to the user. */
3140 struct ipmi_recv_msg *recv_msg = msg->user_data;
3143 if (msg->rsp_size < 2)
3144 /* Message is too small to be correct. */
3147 chan = msg->data[2] & 0x0f;
3148 if (chan >= IPMI_MAX_CHANNELS)
3149 /* Invalid channel number */
3155 /* Make sure the user still exists. */
3156 if (!recv_msg->user || !recv_msg->user->valid)
3159 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3160 recv_msg->msg.data = recv_msg->msg_data;
3161 recv_msg->msg.data_len = 1;
3162 recv_msg->msg_data[0] = msg->rsp[2];
3163 deliver_response(recv_msg);
3164 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3165 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
3167 /* It's from the receive queue. */
3168 chan = msg->rsp[3] & 0xf;
3169 if (chan >= IPMI_MAX_CHANNELS) {
3170 /* Invalid channel number */
3175 switch (intf->channels[chan].medium) {
3176 case IPMI_CHANNEL_MEDIUM_IPMB:
3177 if (msg->rsp[4] & 0x04) {
3178 /* It's a response, so find the
3179 requesting message and send it up. */
3180 requeue = handle_ipmb_get_msg_rsp(intf, msg);
3182 /* It's a command to the SMS from some other
3183 entity. Handle that. */
3184 requeue = handle_ipmb_get_msg_cmd(intf, msg);
3188 case IPMI_CHANNEL_MEDIUM_8023LAN:
3189 case IPMI_CHANNEL_MEDIUM_ASYNC:
3190 if (msg->rsp[6] & 0x04) {
3191 /* It's a response, so find the
3192 requesting message and send it up. */
3193 requeue = handle_lan_get_msg_rsp(intf, msg);
3195 /* It's a command to the SMS from some other
3196 entity. Handle that. */
3197 requeue = handle_lan_get_msg_cmd(intf, msg);
3202 /* We don't handle the channel type, so just
3203 * free the message. */
3207 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3208 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
3210 /* It's an asyncronous event. */
3211 requeue = handle_read_event_rsp(intf, msg);
3213 /* It's a response from the local BMC. */
3214 requeue = handle_bmc_rsp(intf, msg);
3221 /* Handle a new message from the lower layer. */
3222 void ipmi_smi_msg_received(ipmi_smi_t intf,
3223 struct ipmi_smi_msg *msg)
3225 unsigned long flags;
3229 if ((msg->data_size >= 2)
3230 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3231 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3232 && (msg->user_data == NULL))
3234 /* This is the local response to a command send, start
3235 the timer for these. The user_data will not be
3236 NULL if this is a response send, and we will let
3237 response sends just go through. */
3239 /* Check for errors, if we get certain errors (ones
3240 that mean basically we can try again later), we
3241 ignore them and start the timer. Otherwise we
3242 report the error immediately. */
3243 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3244 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3245 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR))
3247 int chan = msg->rsp[3] & 0xf;
3249 /* Got an error sending the message, handle it. */
3250 spin_lock_irqsave(&intf->counter_lock, flags);
3251 if (chan >= IPMI_MAX_CHANNELS)
3252 ; /* This shouldn't happen */
3253 else if ((intf->channels[chan].medium
3254 == IPMI_CHANNEL_MEDIUM_8023LAN)
3255 || (intf->channels[chan].medium
3256 == IPMI_CHANNEL_MEDIUM_ASYNC))
3257 intf->sent_lan_command_errs++;
3259 intf->sent_ipmb_command_errs++;
3260 spin_unlock_irqrestore(&intf->counter_lock, flags);
3261 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3263 /* The message was sent, start the timer. */
3264 intf_start_seq_timer(intf, msg->msgid);
3267 ipmi_free_smi_msg(msg);
3271 /* To preserve message order, if the list is not empty, we
3272 tack this message onto the end of the list. */
3273 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3274 if (!list_empty(&intf->waiting_msgs)) {
3275 list_add_tail(&msg->link, &intf->waiting_msgs);
3276 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3279 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3281 rv = handle_new_recv_msg(intf, msg);
3283 /* Could not handle the message now, just add it to a
3284 list to handle later. */
3285 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3286 list_add_tail(&msg->link, &intf->waiting_msgs);
3287 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3288 } else if (rv == 0) {
3289 ipmi_free_smi_msg(msg);
3296 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3301 list_for_each_entry_rcu(user, &intf->users, link) {
3302 if (!user->handler->ipmi_watchdog_pretimeout)
3305 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3311 handle_msg_timeout(struct ipmi_recv_msg *msg)
3313 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3314 msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
3315 msg->msg.netfn |= 1; /* Convert to a response. */
3316 msg->msg.data_len = 1;
3317 msg->msg.data = msg->msg_data;
3318 deliver_response(msg);
3321 static struct ipmi_smi_msg *
3322 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3323 unsigned char seq, long seqid)
3325 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3327 /* If we can't allocate the message, then just return, we
3328 get 4 retries, so this should be ok. */
3331 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3332 smi_msg->data_size = recv_msg->msg.data_len;
3333 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3339 for (m = 0; m < smi_msg->data_size; m++)
3340 printk(" %2.2x", smi_msg->data[m]);
3347 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3348 struct list_head *timeouts, long timeout_period,
3349 int slot, unsigned long *flags)
3351 struct ipmi_recv_msg *msg;
3356 ent->timeout -= timeout_period;
3357 if (ent->timeout > 0)
3360 if (ent->retries_left == 0) {
3361 /* The message has used all its retries. */
3363 msg = ent->recv_msg;
3364 list_add_tail(&msg->link, timeouts);
3365 spin_lock(&intf->counter_lock);
3367 intf->timed_out_ipmb_broadcasts++;
3368 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3369 intf->timed_out_lan_commands++;
3371 intf->timed_out_ipmb_commands++;
3372 spin_unlock(&intf->counter_lock);
3374 struct ipmi_smi_msg *smi_msg;
3375 /* More retries, send again. */
3377 /* Start with the max timer, set to normal
3378 timer after the message is sent. */
3379 ent->timeout = MAX_MSG_TIMEOUT;
3380 ent->retries_left--;
3381 spin_lock(&intf->counter_lock);
3382 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3383 intf->retransmitted_lan_commands++;
3385 intf->retransmitted_ipmb_commands++;
3386 spin_unlock(&intf->counter_lock);
3388 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3393 spin_unlock_irqrestore(&intf->seq_lock, *flags);
3394 /* Send the new message. We send with a zero
3395 * priority. It timed out, I doubt time is
3396 * that critical now, and high priority
3397 * messages are really only for messages to the
3398 * local MC, which don't get resent. */
3399 intf->handlers->sender(intf->send_info,
3401 spin_lock_irqsave(&intf->seq_lock, *flags);
3405 static void ipmi_timeout_handler(long timeout_period)
3408 struct list_head timeouts;
3409 struct ipmi_recv_msg *msg, *msg2;
3410 struct ipmi_smi_msg *smi_msg, *smi_msg2;
3411 unsigned long flags;
3414 INIT_LIST_HEAD(&timeouts);
3416 spin_lock(&interfaces_lock);
3417 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3418 intf = ipmi_interfaces[i];
3419 if (IPMI_INVALID_INTERFACE(intf))
3421 kref_get(&intf->refcount);
3422 spin_unlock(&interfaces_lock);
3424 /* See if any waiting messages need to be processed. */
3425 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3426 list_for_each_entry_safe(smi_msg, smi_msg2,
3427 &intf->waiting_msgs, link) {
3428 if (!handle_new_recv_msg(intf, smi_msg)) {
3429 list_del(&smi_msg->link);
3430 ipmi_free_smi_msg(smi_msg);
3432 /* To preserve message order, quit if we
3433 can't handle a message. */
3437 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3439 /* Go through the seq table and find any messages that
3440 have timed out, putting them in the timeouts
3442 spin_lock_irqsave(&intf->seq_lock, flags);
3443 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++)
3444 check_msg_timeout(intf, &(intf->seq_table[j]),
3445 &timeouts, timeout_period, j,
3447 spin_unlock_irqrestore(&intf->seq_lock, flags);
3449 list_for_each_entry_safe(msg, msg2, &timeouts, link)
3450 handle_msg_timeout(msg);
3452 kref_put(&intf->refcount, intf_free);
3453 spin_lock(&interfaces_lock);
3455 spin_unlock(&interfaces_lock);
3458 static void ipmi_request_event(void)
3463 spin_lock(&interfaces_lock);
3464 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3465 intf = ipmi_interfaces[i];
3466 if (IPMI_INVALID_INTERFACE(intf))
3469 intf->handlers->request_events(intf->send_info);
3471 spin_unlock(&interfaces_lock);
3474 static struct timer_list ipmi_timer;
3476 /* Call every ~100 ms. */
3477 #define IPMI_TIMEOUT_TIME 100
3479 /* How many jiffies does it take to get to the timeout time. */
3480 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3482 /* Request events from the queue every second (this is the number of
3483 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3484 future, IPMI will add a way to know immediately if an event is in
3485 the queue and this silliness can go away. */
3486 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3488 static atomic_t stop_operation;
3489 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3491 static void ipmi_timeout(unsigned long data)
3493 if (atomic_read(&stop_operation))
3497 if (ticks_to_req_ev == 0) {
3498 ipmi_request_event();
3499 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3502 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
3504 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3508 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
3509 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
3511 /* FIXME - convert these to slabs. */
3512 static void free_smi_msg(struct ipmi_smi_msg *msg)
3514 atomic_dec(&smi_msg_inuse_count);
3518 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3520 struct ipmi_smi_msg *rv;
3521 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
3523 rv->done = free_smi_msg;
3524 rv->user_data = NULL;
3525 atomic_inc(&smi_msg_inuse_count);
3530 static void free_recv_msg(struct ipmi_recv_msg *msg)
3532 atomic_dec(&recv_msg_inuse_count);
3536 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3538 struct ipmi_recv_msg *rv;
3540 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
3543 rv->done = free_recv_msg;
3544 atomic_inc(&recv_msg_inuse_count);
3549 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3552 kref_put(&msg->user->refcount, free_user);
3556 #ifdef CONFIG_IPMI_PANIC_EVENT
3558 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
3562 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
3566 #ifdef CONFIG_IPMI_PANIC_STRING
3567 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3569 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3570 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3571 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3572 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3574 /* A get event receiver command, save it. */
3575 intf->event_receiver = msg->msg.data[1];
3576 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
3580 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3582 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3583 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3584 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3585 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3587 /* A get device id command, save if we are an event
3588 receiver or generator. */
3589 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3590 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3595 static void send_panic_events(char *str)
3597 struct kernel_ipmi_msg msg;
3599 unsigned char data[16];
3601 struct ipmi_system_interface_addr *si;
3602 struct ipmi_addr addr;
3603 struct ipmi_smi_msg smi_msg;
3604 struct ipmi_recv_msg recv_msg;
3606 si = (struct ipmi_system_interface_addr *) &addr;
3607 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3608 si->channel = IPMI_BMC_CHANNEL;
3611 /* Fill in an event telling that we have failed. */
3612 msg.netfn = 0x04; /* Sensor or Event. */
3613 msg.cmd = 2; /* Platform event command. */
3616 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3617 data[1] = 0x03; /* This is for IPMI 1.0. */
3618 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3619 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3620 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3622 /* Put a few breadcrumbs in. Hopefully later we can add more things
3623 to make the panic events more useful. */
3630 smi_msg.done = dummy_smi_done_handler;
3631 recv_msg.done = dummy_recv_done_handler;
3633 /* For every registered interface, send the event. */
3634 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3635 intf = ipmi_interfaces[i];
3636 if (IPMI_INVALID_INTERFACE(intf))
3639 /* Send the event announcing the panic. */
3640 intf->handlers->set_run_to_completion(intf->send_info, 1);
3641 i_ipmi_request(NULL,
3650 intf->channels[0].address,
3651 intf->channels[0].lun,
3652 0, 1); /* Don't retry, and don't wait. */
3655 #ifdef CONFIG_IPMI_PANIC_STRING
3656 /* On every interface, dump a bunch of OEM event holding the
3661 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3663 struct ipmi_ipmb_addr *ipmb;
3666 intf = ipmi_interfaces[i];
3667 if (IPMI_INVALID_INTERFACE(intf))
3670 /* First job here is to figure out where to send the
3671 OEM events. There's no way in IPMI to send OEM
3672 events using an event send command, so we have to
3673 find the SEL to put them in and stick them in
3676 /* Get capabilities from the get device id. */
3677 intf->local_sel_device = 0;
3678 intf->local_event_generator = 0;
3679 intf->event_receiver = 0;
3681 /* Request the device info from the local MC. */
3682 msg.netfn = IPMI_NETFN_APP_REQUEST;
3683 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3686 intf->null_user_handler = device_id_fetcher;
3687 i_ipmi_request(NULL,
3696 intf->channels[0].address,
3697 intf->channels[0].lun,
3698 0, 1); /* Don't retry, and don't wait. */
3700 if (intf->local_event_generator) {
3701 /* Request the event receiver from the local MC. */
3702 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3703 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3706 intf->null_user_handler = event_receiver_fetcher;
3707 i_ipmi_request(NULL,
3716 intf->channels[0].address,
3717 intf->channels[0].lun,
3718 0, 1); /* no retry, and no wait. */
3720 intf->null_user_handler = NULL;
3722 /* Validate the event receiver. The low bit must not
3723 be 1 (it must be a valid IPMB address), it cannot
3724 be zero, and it must not be my address. */
3725 if (((intf->event_receiver & 1) == 0)
3726 && (intf->event_receiver != 0)
3727 && (intf->event_receiver != intf->channels[0].address))
3729 /* The event receiver is valid, send an IPMB
3731 ipmb = (struct ipmi_ipmb_addr *) &addr;
3732 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3733 ipmb->channel = 0; /* FIXME - is this right? */
3734 ipmb->lun = intf->event_receiver_lun;
3735 ipmb->slave_addr = intf->event_receiver;
3736 } else if (intf->local_sel_device) {
3737 /* The event receiver was not valid (or was
3738 me), but I am an SEL device, just dump it
3740 si = (struct ipmi_system_interface_addr *) &addr;
3741 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3742 si->channel = IPMI_BMC_CHANNEL;
3745 continue; /* No where to send the event. */
3748 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
3749 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
3755 int size = strlen(p);
3761 data[2] = 0xf0; /* OEM event without timestamp. */
3762 data[3] = intf->channels[0].address;
3763 data[4] = j++; /* sequence # */
3764 /* Always give 11 bytes, so strncpy will fill
3765 it with zeroes for me. */
3766 strncpy(data+5, p, 11);
3769 i_ipmi_request(NULL,
3778 intf->channels[0].address,
3779 intf->channels[0].lun,
3780 0, 1); /* no retry, and no wait. */
3783 #endif /* CONFIG_IPMI_PANIC_STRING */
3785 #endif /* CONFIG_IPMI_PANIC_EVENT */
3787 static int has_panicked = 0;
3789 static int panic_event(struct notifier_block *this,
3790 unsigned long event,
3800 /* For every registered interface, set it to run to completion. */
3801 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3802 intf = ipmi_interfaces[i];
3803 if (IPMI_INVALID_INTERFACE(intf))
3806 intf->handlers->set_run_to_completion(intf->send_info, 1);
3809 #ifdef CONFIG_IPMI_PANIC_EVENT
3810 send_panic_events(ptr);
3816 static struct notifier_block panic_block = {
3817 .notifier_call = panic_event,
3819 .priority = 200 /* priority: INT_MAX >= x >= 0 */
3822 static int ipmi_init_msghandler(void)
3830 rv = driver_register(&ipmidriver);
3832 printk(KERN_ERR PFX "Could not register IPMI driver\n");
3836 printk(KERN_INFO "ipmi message handler version "
3837 IPMI_DRIVER_VERSION "\n");
3839 for (i = 0; i < MAX_IPMI_INTERFACES; i++)
3840 ipmi_interfaces[i] = NULL;
3842 #ifdef CONFIG_PROC_FS
3843 proc_ipmi_root = proc_mkdir("ipmi", NULL);
3844 if (!proc_ipmi_root) {
3845 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
3849 proc_ipmi_root->owner = THIS_MODULE;
3850 #endif /* CONFIG_PROC_FS */
3852 setup_timer(&ipmi_timer, ipmi_timeout, 0);
3853 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3855 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
3862 static __init int ipmi_init_msghandler_mod(void)
3864 ipmi_init_msghandler();
3868 static __exit void cleanup_ipmi(void)
3875 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
3877 /* This can't be called if any interfaces exist, so no worry about
3878 shutting down the interfaces. */
3880 /* Tell the timer to stop, then wait for it to stop. This avoids
3881 problems with race conditions removing the timer here. */
3882 atomic_inc(&stop_operation);
3883 del_timer_sync(&ipmi_timer);
3885 #ifdef CONFIG_PROC_FS
3886 remove_proc_entry(proc_ipmi_root->name, &proc_root);
3887 #endif /* CONFIG_PROC_FS */
3889 driver_unregister(&ipmidriver);
3893 /* Check for buffer leaks. */
3894 count = atomic_read(&smi_msg_inuse_count);
3896 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
3898 count = atomic_read(&recv_msg_inuse_count);
3900 printk(KERN_WARNING PFX "recv message count %d at exit\n",
3903 module_exit(cleanup_ipmi);
3905 module_init(ipmi_init_msghandler_mod);
3906 MODULE_LICENSE("GPL");
3907 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3908 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
3909 MODULE_VERSION(IPMI_DRIVER_VERSION);
3911 EXPORT_SYMBOL(ipmi_create_user);
3912 EXPORT_SYMBOL(ipmi_destroy_user);
3913 EXPORT_SYMBOL(ipmi_get_version);
3914 EXPORT_SYMBOL(ipmi_request_settime);
3915 EXPORT_SYMBOL(ipmi_request_supply_msgs);
3916 EXPORT_SYMBOL(ipmi_register_smi);
3917 EXPORT_SYMBOL(ipmi_unregister_smi);
3918 EXPORT_SYMBOL(ipmi_register_for_cmd);
3919 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
3920 EXPORT_SYMBOL(ipmi_smi_msg_received);
3921 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3922 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
3923 EXPORT_SYMBOL(ipmi_addr_length);
3924 EXPORT_SYMBOL(ipmi_validate_addr);
3925 EXPORT_SYMBOL(ipmi_set_gets_events);
3926 EXPORT_SYMBOL(ipmi_smi_watcher_register);
3927 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
3928 EXPORT_SYMBOL(ipmi_set_my_address);
3929 EXPORT_SYMBOL(ipmi_get_my_address);
3930 EXPORT_SYMBOL(ipmi_set_my_LUN);
3931 EXPORT_SYMBOL(ipmi_get_my_LUN);
3932 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
3933 EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
3934 EXPORT_SYMBOL(ipmi_free_recv_msg);