4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/config.h>
35 #include <linux/module.h>
36 #include <linux/errno.h>
37 #include <asm/system.h>
38 #include <linux/sched.h>
39 #include <linux/poll.h>
40 #include <linux/spinlock.h>
41 #include <linux/slab.h>
42 #include <linux/ipmi.h>
43 #include <linux/ipmi_smi.h>
44 #include <linux/notifier.h>
45 #include <linux/init.h>
46 #include <linux/proc_fs.h>
47 #include <linux/rcupdate.h>
49 #define PFX "IPMI message handler: "
51 #define IPMI_DRIVER_VERSION "39.0"
53 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
54 static int ipmi_init_msghandler(void);
56 static int initialized = 0;
59 struct proc_dir_entry *proc_ipmi_root = NULL;
60 EXPORT_SYMBOL(proc_ipmi_root);
61 #endif /* CONFIG_PROC_FS */
63 #define MAX_EVENTS_IN_QUEUE 25
65 /* Don't let a message sit in a queue forever, always time it with at lest
66 the max message timer. This is in milliseconds. */
67 #define MAX_MSG_TIMEOUT 60000
71 * The main "user" data structure.
75 struct list_head link;
77 /* Set to "0" when the user is destroyed. */
82 /* The upper layer that handles receive messages. */
83 struct ipmi_user_hndl *handler;
86 /* The interface this user is bound to. */
89 /* Does this interface receive IPMI events? */
95 struct list_head link;
102 * This is used to form a linked lised during mass deletion.
103 * Since this is in an RCU list, we cannot use the link above
104 * or change any data until the RCU period completes. So we
105 * use this next variable during mass deletion so we can have
106 * a list and don't have to wait and restart the search on
107 * every individual deletion of a command. */
108 struct cmd_rcvr *next;
113 unsigned int inuse : 1;
114 unsigned int broadcast : 1;
116 unsigned long timeout;
117 unsigned long orig_timeout;
118 unsigned int retries_left;
120 /* To verify on an incoming send message response that this is
121 the message that the response is for, we keep a sequence id
122 and increment it every time we send a message. */
125 /* This is held so we can properly respond to the message on a
126 timeout, and it is used to hold the temporary data for
127 retransmission, too. */
128 struct ipmi_recv_msg *recv_msg;
131 /* Store the information in a msgid (long) to allow us to find a
132 sequence table entry from the msgid. */
133 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
135 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
137 seq = ((msgid >> 26) & 0x3f); \
138 seqid = (msgid & 0x3fffff); \
141 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
145 unsigned char medium;
146 unsigned char protocol;
148 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
149 but may be changed by the user. */
150 unsigned char address;
152 /* My LUN. This should generally stay the SMS LUN, but just in
157 #ifdef CONFIG_PROC_FS
158 struct ipmi_proc_entry
161 struct ipmi_proc_entry *next;
167 struct platform_device *dev;
168 struct ipmi_device_id id;
169 unsigned char guid[16];
172 struct kref refcount;
174 /* bmc device attributes */
175 struct device_attribute device_id_attr;
176 struct device_attribute provides_dev_sdrs_attr;
177 struct device_attribute revision_attr;
178 struct device_attribute firmware_rev_attr;
179 struct device_attribute version_attr;
180 struct device_attribute add_dev_support_attr;
181 struct device_attribute manufacturer_id_attr;
182 struct device_attribute product_id_attr;
183 struct device_attribute guid_attr;
184 struct device_attribute aux_firmware_rev_attr;
187 #define IPMI_IPMB_NUM_SEQ 64
188 #define IPMI_MAX_CHANNELS 16
191 /* What interface number are we? */
194 struct kref refcount;
196 /* The list of upper layers that are using me. seq_lock
198 struct list_head users;
200 /* Used for wake ups at startup. */
201 wait_queue_head_t waitq;
203 struct bmc_device *bmc;
206 /* This is the lower-layer's sender routine. */
207 struct ipmi_smi_handlers *handlers;
210 #ifdef CONFIG_PROC_FS
211 /* A list of proc entries for this interface. This does not
212 need a lock, only one thread creates it and only one thread
214 spinlock_t proc_entry_lock;
215 struct ipmi_proc_entry *proc_entries;
218 /* Driver-model device for the system interface. */
219 struct device *si_dev;
221 /* A table of sequence numbers for this interface. We use the
222 sequence numbers for IPMB messages that go out of the
223 interface to match them up with their responses. A routine
224 is called periodically to time the items in this list. */
226 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
229 /* Messages that were delayed for some reason (out of memory,
230 for instance), will go in here to be processed later in a
231 periodic timer interrupt. */
232 spinlock_t waiting_msgs_lock;
233 struct list_head waiting_msgs;
235 /* The list of command receivers that are registered for commands
236 on this interface. */
237 struct semaphore cmd_rcvrs_lock;
238 struct list_head cmd_rcvrs;
240 /* Events that were queues because no one was there to receive
242 spinlock_t events_lock; /* For dealing with event stuff. */
243 struct list_head waiting_events;
244 unsigned int waiting_events_count; /* How many events in queue? */
246 /* The event receiver for my BMC, only really used at panic
247 shutdown as a place to store this. */
248 unsigned char event_receiver;
249 unsigned char event_receiver_lun;
250 unsigned char local_sel_device;
251 unsigned char local_event_generator;
253 /* A cheap hack, if this is non-null and a message to an
254 interface comes in with a NULL user, call this routine with
255 it. Note that the message will still be freed by the
256 caller. This only works on the system interface. */
257 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
259 /* When we are scanning the channels for an SMI, this will
260 tell which channel we are scanning. */
263 /* Channel information */
264 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
267 struct proc_dir_entry *proc_dir;
268 char proc_dir_name[10];
270 spinlock_t counter_lock; /* For making counters atomic. */
272 /* Commands we got that were invalid. */
273 unsigned int sent_invalid_commands;
275 /* Commands we sent to the MC. */
276 unsigned int sent_local_commands;
277 /* Responses from the MC that were delivered to a user. */
278 unsigned int handled_local_responses;
279 /* Responses from the MC that were not delivered to a user. */
280 unsigned int unhandled_local_responses;
282 /* Commands we sent out to the IPMB bus. */
283 unsigned int sent_ipmb_commands;
284 /* Commands sent on the IPMB that had errors on the SEND CMD */
285 unsigned int sent_ipmb_command_errs;
286 /* Each retransmit increments this count. */
287 unsigned int retransmitted_ipmb_commands;
288 /* When a message times out (runs out of retransmits) this is
290 unsigned int timed_out_ipmb_commands;
292 /* This is like above, but for broadcasts. Broadcasts are
293 *not* included in the above count (they are expected to
295 unsigned int timed_out_ipmb_broadcasts;
297 /* Responses I have sent to the IPMB bus. */
298 unsigned int sent_ipmb_responses;
300 /* The response was delivered to the user. */
301 unsigned int handled_ipmb_responses;
302 /* The response had invalid data in it. */
303 unsigned int invalid_ipmb_responses;
304 /* The response didn't have anyone waiting for it. */
305 unsigned int unhandled_ipmb_responses;
307 /* Commands we sent out to the IPMB bus. */
308 unsigned int sent_lan_commands;
309 /* Commands sent on the IPMB that had errors on the SEND CMD */
310 unsigned int sent_lan_command_errs;
311 /* Each retransmit increments this count. */
312 unsigned int retransmitted_lan_commands;
313 /* When a message times out (runs out of retransmits) this is
315 unsigned int timed_out_lan_commands;
317 /* Responses I have sent to the IPMB bus. */
318 unsigned int sent_lan_responses;
320 /* The response was delivered to the user. */
321 unsigned int handled_lan_responses;
322 /* The response had invalid data in it. */
323 unsigned int invalid_lan_responses;
324 /* The response didn't have anyone waiting for it. */
325 unsigned int unhandled_lan_responses;
327 /* The command was delivered to the user. */
328 unsigned int handled_commands;
329 /* The command had invalid data in it. */
330 unsigned int invalid_commands;
331 /* The command didn't have anyone waiting for it. */
332 unsigned int unhandled_commands;
334 /* Invalid data in an event. */
335 unsigned int invalid_events;
336 /* Events that were received with the proper format. */
339 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
341 /* Used to mark an interface entry that cannot be used but is not a
342 * free entry, either, primarily used at creation and deletion time so
343 * a slot doesn't get reused too quickly. */
344 #define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1))
345 #define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
346 || (i == IPMI_INVALID_INTERFACE_ENTRY))
349 * The driver model view of the IPMI messaging driver.
351 static struct device_driver ipmidriver = {
353 .bus = &platform_bus_type
355 static DEFINE_MUTEX(ipmidriver_mutex);
357 #define MAX_IPMI_INTERFACES 4
358 static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
360 /* Directly protects the ipmi_interfaces data structure. */
361 static DEFINE_SPINLOCK(interfaces_lock);
363 /* List of watchers that want to know when smi's are added and
365 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
366 static DECLARE_RWSEM(smi_watchers_sem);
369 static void free_recv_msg_list(struct list_head *q)
371 struct ipmi_recv_msg *msg, *msg2;
373 list_for_each_entry_safe(msg, msg2, q, link) {
374 list_del(&msg->link);
375 ipmi_free_recv_msg(msg);
379 static void clean_up_interface_data(ipmi_smi_t intf)
382 struct cmd_rcvr *rcvr, *rcvr2;
383 struct list_head list;
385 free_recv_msg_list(&intf->waiting_msgs);
386 free_recv_msg_list(&intf->waiting_events);
388 /* Wholesale remove all the entries from the list in the
389 * interface and wait for RCU to know that none are in use. */
390 down(&intf->cmd_rcvrs_lock);
391 list_add_rcu(&list, &intf->cmd_rcvrs);
392 list_del_rcu(&intf->cmd_rcvrs);
393 up(&intf->cmd_rcvrs_lock);
396 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
399 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
400 if ((intf->seq_table[i].inuse)
401 && (intf->seq_table[i].recv_msg))
403 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
408 static void intf_free(struct kref *ref)
410 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
412 clean_up_interface_data(intf);
416 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
421 down_write(&smi_watchers_sem);
422 list_add(&(watcher->link), &smi_watchers);
423 up_write(&smi_watchers_sem);
424 spin_lock_irqsave(&interfaces_lock, flags);
425 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
426 ipmi_smi_t intf = ipmi_interfaces[i];
427 if (IPMI_INVALID_INTERFACE(intf))
429 spin_unlock_irqrestore(&interfaces_lock, flags);
430 watcher->new_smi(i, intf->si_dev);
431 spin_lock_irqsave(&interfaces_lock, flags);
433 spin_unlock_irqrestore(&interfaces_lock, flags);
437 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
439 down_write(&smi_watchers_sem);
440 list_del(&(watcher->link));
441 up_write(&smi_watchers_sem);
446 call_smi_watchers(int i, struct device *dev)
448 struct ipmi_smi_watcher *w;
450 down_read(&smi_watchers_sem);
451 list_for_each_entry(w, &smi_watchers, link) {
452 if (try_module_get(w->owner)) {
454 module_put(w->owner);
457 up_read(&smi_watchers_sem);
461 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
463 if (addr1->addr_type != addr2->addr_type)
466 if (addr1->channel != addr2->channel)
469 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
470 struct ipmi_system_interface_addr *smi_addr1
471 = (struct ipmi_system_interface_addr *) addr1;
472 struct ipmi_system_interface_addr *smi_addr2
473 = (struct ipmi_system_interface_addr *) addr2;
474 return (smi_addr1->lun == smi_addr2->lun);
477 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
478 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
480 struct ipmi_ipmb_addr *ipmb_addr1
481 = (struct ipmi_ipmb_addr *) addr1;
482 struct ipmi_ipmb_addr *ipmb_addr2
483 = (struct ipmi_ipmb_addr *) addr2;
485 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
486 && (ipmb_addr1->lun == ipmb_addr2->lun));
489 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
490 struct ipmi_lan_addr *lan_addr1
491 = (struct ipmi_lan_addr *) addr1;
492 struct ipmi_lan_addr *lan_addr2
493 = (struct ipmi_lan_addr *) addr2;
495 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
496 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
497 && (lan_addr1->session_handle
498 == lan_addr2->session_handle)
499 && (lan_addr1->lun == lan_addr2->lun));
505 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
507 if (len < sizeof(struct ipmi_system_interface_addr)) {
511 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
512 if (addr->channel != IPMI_BMC_CHANNEL)
517 if ((addr->channel == IPMI_BMC_CHANNEL)
518 || (addr->channel >= IPMI_MAX_CHANNELS)
519 || (addr->channel < 0))
522 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
523 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
525 if (len < sizeof(struct ipmi_ipmb_addr)) {
531 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
532 if (len < sizeof(struct ipmi_lan_addr)) {
541 unsigned int ipmi_addr_length(int addr_type)
543 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
544 return sizeof(struct ipmi_system_interface_addr);
546 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
547 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
549 return sizeof(struct ipmi_ipmb_addr);
552 if (addr_type == IPMI_LAN_ADDR_TYPE)
553 return sizeof(struct ipmi_lan_addr);
558 static void deliver_response(struct ipmi_recv_msg *msg)
561 ipmi_smi_t intf = msg->user_msg_data;
564 /* Special handling for NULL users. */
565 if (intf->null_user_handler) {
566 intf->null_user_handler(intf, msg);
567 spin_lock_irqsave(&intf->counter_lock, flags);
568 intf->handled_local_responses++;
569 spin_unlock_irqrestore(&intf->counter_lock, flags);
571 /* No handler, so give up. */
572 spin_lock_irqsave(&intf->counter_lock, flags);
573 intf->unhandled_local_responses++;
574 spin_unlock_irqrestore(&intf->counter_lock, flags);
576 ipmi_free_recv_msg(msg);
578 ipmi_user_t user = msg->user;
579 user->handler->ipmi_recv_hndl(msg, user->handler_data);
583 /* Find the next sequence number not being used and add the given
584 message with the given timeout to the sequence table. This must be
585 called with the interface's seq_lock held. */
586 static int intf_next_seq(ipmi_smi_t intf,
587 struct ipmi_recv_msg *recv_msg,
588 unsigned long timeout,
597 for (i = intf->curr_seq;
598 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
599 i = (i+1)%IPMI_IPMB_NUM_SEQ)
601 if (!intf->seq_table[i].inuse)
605 if (!intf->seq_table[i].inuse) {
606 intf->seq_table[i].recv_msg = recv_msg;
608 /* Start with the maximum timeout, when the send response
609 comes in we will start the real timer. */
610 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
611 intf->seq_table[i].orig_timeout = timeout;
612 intf->seq_table[i].retries_left = retries;
613 intf->seq_table[i].broadcast = broadcast;
614 intf->seq_table[i].inuse = 1;
615 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
617 *seqid = intf->seq_table[i].seqid;
618 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
626 /* Return the receive message for the given sequence number and
627 release the sequence number so it can be reused. Some other data
628 is passed in to be sure the message matches up correctly (to help
629 guard against message coming in after their timeout and the
630 sequence number being reused). */
631 static int intf_find_seq(ipmi_smi_t intf,
636 struct ipmi_addr *addr,
637 struct ipmi_recv_msg **recv_msg)
642 if (seq >= IPMI_IPMB_NUM_SEQ)
645 spin_lock_irqsave(&(intf->seq_lock), flags);
646 if (intf->seq_table[seq].inuse) {
647 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
649 if ((msg->addr.channel == channel)
650 && (msg->msg.cmd == cmd)
651 && (msg->msg.netfn == netfn)
652 && (ipmi_addr_equal(addr, &(msg->addr))))
655 intf->seq_table[seq].inuse = 0;
659 spin_unlock_irqrestore(&(intf->seq_lock), flags);
665 /* Start the timer for a specific sequence table entry. */
666 static int intf_start_seq_timer(ipmi_smi_t intf,
675 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
677 spin_lock_irqsave(&(intf->seq_lock), flags);
678 /* We do this verification because the user can be deleted
679 while a message is outstanding. */
680 if ((intf->seq_table[seq].inuse)
681 && (intf->seq_table[seq].seqid == seqid))
683 struct seq_table *ent = &(intf->seq_table[seq]);
684 ent->timeout = ent->orig_timeout;
687 spin_unlock_irqrestore(&(intf->seq_lock), flags);
692 /* Got an error for the send message for a specific sequence number. */
693 static int intf_err_seq(ipmi_smi_t intf,
701 struct ipmi_recv_msg *msg = NULL;
704 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
706 spin_lock_irqsave(&(intf->seq_lock), flags);
707 /* We do this verification because the user can be deleted
708 while a message is outstanding. */
709 if ((intf->seq_table[seq].inuse)
710 && (intf->seq_table[seq].seqid == seqid))
712 struct seq_table *ent = &(intf->seq_table[seq]);
718 spin_unlock_irqrestore(&(intf->seq_lock), flags);
721 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
722 msg->msg_data[0] = err;
723 msg->msg.netfn |= 1; /* Convert to a response. */
724 msg->msg.data_len = 1;
725 msg->msg.data = msg->msg_data;
726 deliver_response(msg);
733 int ipmi_create_user(unsigned int if_num,
734 struct ipmi_user_hndl *handler,
739 ipmi_user_t new_user;
743 /* There is no module usecount here, because it's not
744 required. Since this can only be used by and called from
745 other modules, they will implicitly use this module, and
746 thus this can't be removed unless the other modules are
752 /* Make sure the driver is actually initialized, this handles
753 problems with initialization order. */
755 rv = ipmi_init_msghandler();
759 /* The init code doesn't return an error if it was turned
760 off, but it won't initialize. Check that. */
765 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
769 spin_lock_irqsave(&interfaces_lock, flags);
770 intf = ipmi_interfaces[if_num];
771 if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) {
772 spin_unlock_irqrestore(&interfaces_lock, flags);
777 /* Note that each existing user holds a refcount to the interface. */
778 kref_get(&intf->refcount);
779 spin_unlock_irqrestore(&interfaces_lock, flags);
781 kref_init(&new_user->refcount);
782 new_user->handler = handler;
783 new_user->handler_data = handler_data;
784 new_user->intf = intf;
785 new_user->gets_events = 0;
787 if (!try_module_get(intf->handlers->owner)) {
792 if (intf->handlers->inc_usecount) {
793 rv = intf->handlers->inc_usecount(intf->send_info);
795 module_put(intf->handlers->owner);
801 spin_lock_irqsave(&intf->seq_lock, flags);
802 list_add_rcu(&new_user->link, &intf->users);
803 spin_unlock_irqrestore(&intf->seq_lock, flags);
808 kref_put(&intf->refcount, intf_free);
814 static void free_user(struct kref *ref)
816 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
820 int ipmi_destroy_user(ipmi_user_t user)
822 ipmi_smi_t intf = user->intf;
825 struct cmd_rcvr *rcvr;
826 struct cmd_rcvr *rcvrs = NULL;
830 /* Remove the user from the interface's sequence table. */
831 spin_lock_irqsave(&intf->seq_lock, flags);
832 list_del_rcu(&user->link);
834 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
835 if (intf->seq_table[i].inuse
836 && (intf->seq_table[i].recv_msg->user == user))
838 intf->seq_table[i].inuse = 0;
841 spin_unlock_irqrestore(&intf->seq_lock, flags);
844 * Remove the user from the command receiver's table. First
845 * we build a list of everything (not using the standard link,
846 * since other things may be using it till we do
847 * synchronize_rcu()) then free everything in that list.
849 down(&intf->cmd_rcvrs_lock);
850 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
851 if (rcvr->user == user) {
852 list_del_rcu(&rcvr->link);
857 up(&intf->cmd_rcvrs_lock);
865 module_put(intf->handlers->owner);
866 if (intf->handlers->dec_usecount)
867 intf->handlers->dec_usecount(intf->send_info);
869 kref_put(&intf->refcount, intf_free);
871 kref_put(&user->refcount, free_user);
876 void ipmi_get_version(ipmi_user_t user,
877 unsigned char *major,
878 unsigned char *minor)
880 *major = ipmi_version_major(&user->intf->bmc->id);
881 *minor = ipmi_version_minor(&user->intf->bmc->id);
884 int ipmi_set_my_address(ipmi_user_t user,
885 unsigned int channel,
886 unsigned char address)
888 if (channel >= IPMI_MAX_CHANNELS)
890 user->intf->channels[channel].address = address;
894 int ipmi_get_my_address(ipmi_user_t user,
895 unsigned int channel,
896 unsigned char *address)
898 if (channel >= IPMI_MAX_CHANNELS)
900 *address = user->intf->channels[channel].address;
904 int ipmi_set_my_LUN(ipmi_user_t user,
905 unsigned int channel,
908 if (channel >= IPMI_MAX_CHANNELS)
910 user->intf->channels[channel].lun = LUN & 0x3;
914 int ipmi_get_my_LUN(ipmi_user_t user,
915 unsigned int channel,
916 unsigned char *address)
918 if (channel >= IPMI_MAX_CHANNELS)
920 *address = user->intf->channels[channel].lun;
924 int ipmi_set_gets_events(ipmi_user_t user, int val)
927 ipmi_smi_t intf = user->intf;
928 struct ipmi_recv_msg *msg, *msg2;
929 struct list_head msgs;
931 INIT_LIST_HEAD(&msgs);
933 spin_lock_irqsave(&intf->events_lock, flags);
934 user->gets_events = val;
937 /* Deliver any queued events. */
938 list_for_each_entry_safe(msg, msg2, &intf->waiting_events,
940 list_del(&msg->link);
941 list_add_tail(&msg->link, &msgs);
945 /* Hold the events lock while doing this to preserve order. */
946 list_for_each_entry_safe(msg, msg2, &msgs, link) {
948 kref_get(&user->refcount);
949 deliver_response(msg);
952 spin_unlock_irqrestore(&intf->events_lock, flags);
957 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
961 struct cmd_rcvr *rcvr;
963 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
964 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd))
970 int ipmi_register_for_cmd(ipmi_user_t user,
974 ipmi_smi_t intf = user->intf;
975 struct cmd_rcvr *rcvr;
976 struct cmd_rcvr *entry;
980 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
987 down(&intf->cmd_rcvrs_lock);
988 /* Make sure the command/netfn is not already registered. */
989 entry = find_cmd_rcvr(intf, netfn, cmd);
995 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
998 up(&intf->cmd_rcvrs_lock);
1005 int ipmi_unregister_for_cmd(ipmi_user_t user,
1006 unsigned char netfn,
1009 ipmi_smi_t intf = user->intf;
1010 struct cmd_rcvr *rcvr;
1012 down(&intf->cmd_rcvrs_lock);
1013 /* Make sure the command/netfn is not already registered. */
1014 rcvr = find_cmd_rcvr(intf, netfn, cmd);
1015 if ((rcvr) && (rcvr->user == user)) {
1016 list_del_rcu(&rcvr->link);
1017 up(&intf->cmd_rcvrs_lock);
1022 up(&intf->cmd_rcvrs_lock);
1027 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
1029 ipmi_smi_t intf = user->intf;
1030 intf->handlers->set_run_to_completion(intf->send_info, val);
1033 static unsigned char
1034 ipmb_checksum(unsigned char *data, int size)
1036 unsigned char csum = 0;
1038 for (; size > 0; size--, data++)
1044 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1045 struct kernel_ipmi_msg *msg,
1046 struct ipmi_ipmb_addr *ipmb_addr,
1048 unsigned char ipmb_seq,
1050 unsigned char source_address,
1051 unsigned char source_lun)
1055 /* Format the IPMB header data. */
1056 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1057 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1058 smi_msg->data[2] = ipmb_addr->channel;
1060 smi_msg->data[3] = 0;
1061 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1062 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1063 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1064 smi_msg->data[i+6] = source_address;
1065 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1066 smi_msg->data[i+8] = msg->cmd;
1068 /* Now tack on the data to the message. */
1069 if (msg->data_len > 0)
1070 memcpy(&(smi_msg->data[i+9]), msg->data,
1072 smi_msg->data_size = msg->data_len + 9;
1074 /* Now calculate the checksum and tack it on. */
1075 smi_msg->data[i+smi_msg->data_size]
1076 = ipmb_checksum(&(smi_msg->data[i+6]),
1077 smi_msg->data_size-6);
1079 /* Add on the checksum size and the offset from the
1081 smi_msg->data_size += 1 + i;
1083 smi_msg->msgid = msgid;
1086 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1087 struct kernel_ipmi_msg *msg,
1088 struct ipmi_lan_addr *lan_addr,
1090 unsigned char ipmb_seq,
1091 unsigned char source_lun)
1093 /* Format the IPMB header data. */
1094 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1095 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1096 smi_msg->data[2] = lan_addr->channel;
1097 smi_msg->data[3] = lan_addr->session_handle;
1098 smi_msg->data[4] = lan_addr->remote_SWID;
1099 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1100 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1101 smi_msg->data[7] = lan_addr->local_SWID;
1102 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1103 smi_msg->data[9] = msg->cmd;
1105 /* Now tack on the data to the message. */
1106 if (msg->data_len > 0)
1107 memcpy(&(smi_msg->data[10]), msg->data,
1109 smi_msg->data_size = msg->data_len + 10;
1111 /* Now calculate the checksum and tack it on. */
1112 smi_msg->data[smi_msg->data_size]
1113 = ipmb_checksum(&(smi_msg->data[7]),
1114 smi_msg->data_size-7);
1116 /* Add on the checksum size and the offset from the
1118 smi_msg->data_size += 1;
1120 smi_msg->msgid = msgid;
1123 /* Separate from ipmi_request so that the user does not have to be
1124 supplied in certain circumstances (mainly at panic time). If
1125 messages are supplied, they will be freed, even if an error
1127 static int i_ipmi_request(ipmi_user_t user,
1129 struct ipmi_addr *addr,
1131 struct kernel_ipmi_msg *msg,
1132 void *user_msg_data,
1134 struct ipmi_recv_msg *supplied_recv,
1136 unsigned char source_address,
1137 unsigned char source_lun,
1139 unsigned int retry_time_ms)
1142 struct ipmi_smi_msg *smi_msg;
1143 struct ipmi_recv_msg *recv_msg;
1144 unsigned long flags;
1147 if (supplied_recv) {
1148 recv_msg = supplied_recv;
1150 recv_msg = ipmi_alloc_recv_msg();
1151 if (recv_msg == NULL) {
1155 recv_msg->user_msg_data = user_msg_data;
1158 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1160 smi_msg = ipmi_alloc_smi_msg();
1161 if (smi_msg == NULL) {
1162 ipmi_free_recv_msg(recv_msg);
1167 recv_msg->user = user;
1169 kref_get(&user->refcount);
1170 recv_msg->msgid = msgid;
1171 /* Store the message to send in the receive message so timeout
1172 responses can get the proper response data. */
1173 recv_msg->msg = *msg;
1175 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1176 struct ipmi_system_interface_addr *smi_addr;
1178 if (msg->netfn & 1) {
1179 /* Responses are not allowed to the SMI. */
1184 smi_addr = (struct ipmi_system_interface_addr *) addr;
1185 if (smi_addr->lun > 3) {
1186 spin_lock_irqsave(&intf->counter_lock, flags);
1187 intf->sent_invalid_commands++;
1188 spin_unlock_irqrestore(&intf->counter_lock, flags);
1193 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1195 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1196 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1197 || (msg->cmd == IPMI_GET_MSG_CMD)
1198 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1200 /* We don't let the user do these, since we manage
1201 the sequence numbers. */
1202 spin_lock_irqsave(&intf->counter_lock, flags);
1203 intf->sent_invalid_commands++;
1204 spin_unlock_irqrestore(&intf->counter_lock, flags);
1209 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1210 spin_lock_irqsave(&intf->counter_lock, flags);
1211 intf->sent_invalid_commands++;
1212 spin_unlock_irqrestore(&intf->counter_lock, flags);
1217 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1218 smi_msg->data[1] = msg->cmd;
1219 smi_msg->msgid = msgid;
1220 smi_msg->user_data = recv_msg;
1221 if (msg->data_len > 0)
1222 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1223 smi_msg->data_size = msg->data_len + 2;
1224 spin_lock_irqsave(&intf->counter_lock, flags);
1225 intf->sent_local_commands++;
1226 spin_unlock_irqrestore(&intf->counter_lock, flags);
1227 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1228 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1230 struct ipmi_ipmb_addr *ipmb_addr;
1231 unsigned char ipmb_seq;
1235 if (addr->channel >= IPMI_MAX_CHANNELS) {
1236 spin_lock_irqsave(&intf->counter_lock, flags);
1237 intf->sent_invalid_commands++;
1238 spin_unlock_irqrestore(&intf->counter_lock, flags);
1243 if (intf->channels[addr->channel].medium
1244 != IPMI_CHANNEL_MEDIUM_IPMB)
1246 spin_lock_irqsave(&intf->counter_lock, flags);
1247 intf->sent_invalid_commands++;
1248 spin_unlock_irqrestore(&intf->counter_lock, flags);
1254 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1255 retries = 0; /* Don't retry broadcasts. */
1259 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1260 /* Broadcasts add a zero at the beginning of the
1261 message, but otherwise is the same as an IPMB
1263 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1268 /* Default to 1 second retries. */
1269 if (retry_time_ms == 0)
1270 retry_time_ms = 1000;
1272 /* 9 for the header and 1 for the checksum, plus
1273 possibly one for the broadcast. */
1274 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1275 spin_lock_irqsave(&intf->counter_lock, flags);
1276 intf->sent_invalid_commands++;
1277 spin_unlock_irqrestore(&intf->counter_lock, flags);
1282 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1283 if (ipmb_addr->lun > 3) {
1284 spin_lock_irqsave(&intf->counter_lock, flags);
1285 intf->sent_invalid_commands++;
1286 spin_unlock_irqrestore(&intf->counter_lock, flags);
1291 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1293 if (recv_msg->msg.netfn & 0x1) {
1294 /* It's a response, so use the user's sequence
1296 spin_lock_irqsave(&intf->counter_lock, flags);
1297 intf->sent_ipmb_responses++;
1298 spin_unlock_irqrestore(&intf->counter_lock, flags);
1299 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1301 source_address, source_lun);
1303 /* Save the receive message so we can use it
1304 to deliver the response. */
1305 smi_msg->user_data = recv_msg;
1307 /* It's a command, so get a sequence for it. */
1309 spin_lock_irqsave(&(intf->seq_lock), flags);
1311 spin_lock(&intf->counter_lock);
1312 intf->sent_ipmb_commands++;
1313 spin_unlock(&intf->counter_lock);
1315 /* Create a sequence number with a 1 second
1316 timeout and 4 retries. */
1317 rv = intf_next_seq(intf,
1325 /* We have used up all the sequence numbers,
1326 probably, so abort. */
1327 spin_unlock_irqrestore(&(intf->seq_lock),
1332 /* Store the sequence number in the message,
1333 so that when the send message response
1334 comes back we can start the timer. */
1335 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1336 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1337 ipmb_seq, broadcast,
1338 source_address, source_lun);
1340 /* Copy the message into the recv message data, so we
1341 can retransmit it later if necessary. */
1342 memcpy(recv_msg->msg_data, smi_msg->data,
1343 smi_msg->data_size);
1344 recv_msg->msg.data = recv_msg->msg_data;
1345 recv_msg->msg.data_len = smi_msg->data_size;
1347 /* We don't unlock until here, because we need
1348 to copy the completed message into the
1349 recv_msg before we release the lock.
1350 Otherwise, race conditions may bite us. I
1351 know that's pretty paranoid, but I prefer
1353 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1355 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1356 struct ipmi_lan_addr *lan_addr;
1357 unsigned char ipmb_seq;
1360 if (addr->channel >= IPMI_MAX_CHANNELS) {
1361 spin_lock_irqsave(&intf->counter_lock, flags);
1362 intf->sent_invalid_commands++;
1363 spin_unlock_irqrestore(&intf->counter_lock, flags);
1368 if ((intf->channels[addr->channel].medium
1369 != IPMI_CHANNEL_MEDIUM_8023LAN)
1370 && (intf->channels[addr->channel].medium
1371 != IPMI_CHANNEL_MEDIUM_ASYNC))
1373 spin_lock_irqsave(&intf->counter_lock, flags);
1374 intf->sent_invalid_commands++;
1375 spin_unlock_irqrestore(&intf->counter_lock, flags);
1382 /* Default to 1 second retries. */
1383 if (retry_time_ms == 0)
1384 retry_time_ms = 1000;
1386 /* 11 for the header and 1 for the checksum. */
1387 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1388 spin_lock_irqsave(&intf->counter_lock, flags);
1389 intf->sent_invalid_commands++;
1390 spin_unlock_irqrestore(&intf->counter_lock, flags);
1395 lan_addr = (struct ipmi_lan_addr *) addr;
1396 if (lan_addr->lun > 3) {
1397 spin_lock_irqsave(&intf->counter_lock, flags);
1398 intf->sent_invalid_commands++;
1399 spin_unlock_irqrestore(&intf->counter_lock, flags);
1404 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1406 if (recv_msg->msg.netfn & 0x1) {
1407 /* It's a response, so use the user's sequence
1409 spin_lock_irqsave(&intf->counter_lock, flags);
1410 intf->sent_lan_responses++;
1411 spin_unlock_irqrestore(&intf->counter_lock, flags);
1412 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1415 /* Save the receive message so we can use it
1416 to deliver the response. */
1417 smi_msg->user_data = recv_msg;
1419 /* It's a command, so get a sequence for it. */
1421 spin_lock_irqsave(&(intf->seq_lock), flags);
1423 spin_lock(&intf->counter_lock);
1424 intf->sent_lan_commands++;
1425 spin_unlock(&intf->counter_lock);
1427 /* Create a sequence number with a 1 second
1428 timeout and 4 retries. */
1429 rv = intf_next_seq(intf,
1437 /* We have used up all the sequence numbers,
1438 probably, so abort. */
1439 spin_unlock_irqrestore(&(intf->seq_lock),
1444 /* Store the sequence number in the message,
1445 so that when the send message response
1446 comes back we can start the timer. */
1447 format_lan_msg(smi_msg, msg, lan_addr,
1448 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1449 ipmb_seq, source_lun);
1451 /* Copy the message into the recv message data, so we
1452 can retransmit it later if necessary. */
1453 memcpy(recv_msg->msg_data, smi_msg->data,
1454 smi_msg->data_size);
1455 recv_msg->msg.data = recv_msg->msg_data;
1456 recv_msg->msg.data_len = smi_msg->data_size;
1458 /* We don't unlock until here, because we need
1459 to copy the completed message into the
1460 recv_msg before we release the lock.
1461 Otherwise, race conditions may bite us. I
1462 know that's pretty paranoid, but I prefer
1464 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1467 /* Unknown address type. */
1468 spin_lock_irqsave(&intf->counter_lock, flags);
1469 intf->sent_invalid_commands++;
1470 spin_unlock_irqrestore(&intf->counter_lock, flags);
1478 for (m = 0; m < smi_msg->data_size; m++)
1479 printk(" %2.2x", smi_msg->data[m]);
1483 intf->handlers->sender(intf->send_info, smi_msg, priority);
1488 ipmi_free_smi_msg(smi_msg);
1489 ipmi_free_recv_msg(recv_msg);
1493 static int check_addr(ipmi_smi_t intf,
1494 struct ipmi_addr *addr,
1495 unsigned char *saddr,
1498 if (addr->channel >= IPMI_MAX_CHANNELS)
1500 *lun = intf->channels[addr->channel].lun;
1501 *saddr = intf->channels[addr->channel].address;
1505 int ipmi_request_settime(ipmi_user_t user,
1506 struct ipmi_addr *addr,
1508 struct kernel_ipmi_msg *msg,
1509 void *user_msg_data,
1512 unsigned int retry_time_ms)
1514 unsigned char saddr, lun;
1519 rv = check_addr(user->intf, addr, &saddr, &lun);
1522 return i_ipmi_request(user,
1536 int ipmi_request_supply_msgs(ipmi_user_t user,
1537 struct ipmi_addr *addr,
1539 struct kernel_ipmi_msg *msg,
1540 void *user_msg_data,
1542 struct ipmi_recv_msg *supplied_recv,
1545 unsigned char saddr, lun;
1550 rv = check_addr(user->intf, addr, &saddr, &lun);
1553 return i_ipmi_request(user,
1567 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1568 int count, int *eof, void *data)
1570 char *out = (char *) page;
1571 ipmi_smi_t intf = data;
1575 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1576 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1577 out[rv-1] = '\n'; /* Replace the final space with a newline */
1583 static int version_file_read_proc(char *page, char **start, off_t off,
1584 int count, int *eof, void *data)
1586 char *out = (char *) page;
1587 ipmi_smi_t intf = data;
1589 return sprintf(out, "%d.%d\n",
1590 ipmi_version_major(&intf->bmc->id),
1591 ipmi_version_minor(&intf->bmc->id));
1594 static int stat_file_read_proc(char *page, char **start, off_t off,
1595 int count, int *eof, void *data)
1597 char *out = (char *) page;
1598 ipmi_smi_t intf = data;
1600 out += sprintf(out, "sent_invalid_commands: %d\n",
1601 intf->sent_invalid_commands);
1602 out += sprintf(out, "sent_local_commands: %d\n",
1603 intf->sent_local_commands);
1604 out += sprintf(out, "handled_local_responses: %d\n",
1605 intf->handled_local_responses);
1606 out += sprintf(out, "unhandled_local_responses: %d\n",
1607 intf->unhandled_local_responses);
1608 out += sprintf(out, "sent_ipmb_commands: %d\n",
1609 intf->sent_ipmb_commands);
1610 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1611 intf->sent_ipmb_command_errs);
1612 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1613 intf->retransmitted_ipmb_commands);
1614 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1615 intf->timed_out_ipmb_commands);
1616 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1617 intf->timed_out_ipmb_broadcasts);
1618 out += sprintf(out, "sent_ipmb_responses: %d\n",
1619 intf->sent_ipmb_responses);
1620 out += sprintf(out, "handled_ipmb_responses: %d\n",
1621 intf->handled_ipmb_responses);
1622 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1623 intf->invalid_ipmb_responses);
1624 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1625 intf->unhandled_ipmb_responses);
1626 out += sprintf(out, "sent_lan_commands: %d\n",
1627 intf->sent_lan_commands);
1628 out += sprintf(out, "sent_lan_command_errs: %d\n",
1629 intf->sent_lan_command_errs);
1630 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1631 intf->retransmitted_lan_commands);
1632 out += sprintf(out, "timed_out_lan_commands: %d\n",
1633 intf->timed_out_lan_commands);
1634 out += sprintf(out, "sent_lan_responses: %d\n",
1635 intf->sent_lan_responses);
1636 out += sprintf(out, "handled_lan_responses: %d\n",
1637 intf->handled_lan_responses);
1638 out += sprintf(out, "invalid_lan_responses: %d\n",
1639 intf->invalid_lan_responses);
1640 out += sprintf(out, "unhandled_lan_responses: %d\n",
1641 intf->unhandled_lan_responses);
1642 out += sprintf(out, "handled_commands: %d\n",
1643 intf->handled_commands);
1644 out += sprintf(out, "invalid_commands: %d\n",
1645 intf->invalid_commands);
1646 out += sprintf(out, "unhandled_commands: %d\n",
1647 intf->unhandled_commands);
1648 out += sprintf(out, "invalid_events: %d\n",
1649 intf->invalid_events);
1650 out += sprintf(out, "events: %d\n",
1653 return (out - ((char *) page));
1656 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1657 read_proc_t *read_proc, write_proc_t *write_proc,
1658 void *data, struct module *owner)
1661 #ifdef CONFIG_PROC_FS
1662 struct proc_dir_entry *file;
1663 struct ipmi_proc_entry *entry;
1665 /* Create a list element. */
1666 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1669 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1674 strcpy(entry->name, name);
1676 file = create_proc_entry(name, 0, smi->proc_dir);
1684 file->read_proc = read_proc;
1685 file->write_proc = write_proc;
1686 file->owner = owner;
1688 spin_lock(&smi->proc_entry_lock);
1689 /* Stick it on the list. */
1690 entry->next = smi->proc_entries;
1691 smi->proc_entries = entry;
1692 spin_unlock(&smi->proc_entry_lock);
1694 #endif /* CONFIG_PROC_FS */
1699 static int add_proc_entries(ipmi_smi_t smi, int num)
1703 #ifdef CONFIG_PROC_FS
1704 sprintf(smi->proc_dir_name, "%d", num);
1705 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1709 smi->proc_dir->owner = THIS_MODULE;
1713 rv = ipmi_smi_add_proc_entry(smi, "stats",
1714 stat_file_read_proc, NULL,
1718 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1719 ipmb_file_read_proc, NULL,
1723 rv = ipmi_smi_add_proc_entry(smi, "version",
1724 version_file_read_proc, NULL,
1726 #endif /* CONFIG_PROC_FS */
1731 static void remove_proc_entries(ipmi_smi_t smi)
1733 #ifdef CONFIG_PROC_FS
1734 struct ipmi_proc_entry *entry;
1736 spin_lock(&smi->proc_entry_lock);
1737 while (smi->proc_entries) {
1738 entry = smi->proc_entries;
1739 smi->proc_entries = entry->next;
1741 remove_proc_entry(entry->name, smi->proc_dir);
1745 spin_unlock(&smi->proc_entry_lock);
1746 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1747 #endif /* CONFIG_PROC_FS */
1750 static int __find_bmc_guid(struct device *dev, void *data)
1752 unsigned char *id = data;
1753 struct bmc_device *bmc = dev_get_drvdata(dev);
1754 return memcmp(bmc->guid, id, 16) == 0;
1757 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1758 unsigned char *guid)
1762 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1764 return dev_get_drvdata(dev);
1769 struct prod_dev_id {
1770 unsigned int product_id;
1771 unsigned char device_id;
1774 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1776 struct prod_dev_id *id = data;
1777 struct bmc_device *bmc = dev_get_drvdata(dev);
1779 return (bmc->id.product_id == id->product_id
1780 && bmc->id.product_id == id->product_id
1781 && bmc->id.device_id == id->device_id);
1784 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1785 struct device_driver *drv,
1786 unsigned char product_id, unsigned char device_id)
1788 struct prod_dev_id id = {
1789 .product_id = product_id,
1790 .device_id = device_id,
1794 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
1796 return dev_get_drvdata(dev);
1801 static ssize_t device_id_show(struct device *dev,
1802 struct device_attribute *attr,
1805 struct bmc_device *bmc = dev_get_drvdata(dev);
1807 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
1810 static ssize_t provides_dev_sdrs_show(struct device *dev,
1811 struct device_attribute *attr,
1814 struct bmc_device *bmc = dev_get_drvdata(dev);
1816 return snprintf(buf, 10, "%u\n",
1817 bmc->id.device_revision && 0x80 >> 7);
1820 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
1823 struct bmc_device *bmc = dev_get_drvdata(dev);
1825 return snprintf(buf, 20, "%u\n",
1826 bmc->id.device_revision && 0x0F);
1829 static ssize_t firmware_rev_show(struct device *dev,
1830 struct device_attribute *attr,
1833 struct bmc_device *bmc = dev_get_drvdata(dev);
1835 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
1836 bmc->id.firmware_revision_2);
1839 static ssize_t ipmi_version_show(struct device *dev,
1840 struct device_attribute *attr,
1843 struct bmc_device *bmc = dev_get_drvdata(dev);
1845 return snprintf(buf, 20, "%u.%u\n",
1846 ipmi_version_major(&bmc->id),
1847 ipmi_version_minor(&bmc->id));
1850 static ssize_t add_dev_support_show(struct device *dev,
1851 struct device_attribute *attr,
1854 struct bmc_device *bmc = dev_get_drvdata(dev);
1856 return snprintf(buf, 10, "0x%02x\n",
1857 bmc->id.additional_device_support);
1860 static ssize_t manufacturer_id_show(struct device *dev,
1861 struct device_attribute *attr,
1864 struct bmc_device *bmc = dev_get_drvdata(dev);
1866 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
1869 static ssize_t product_id_show(struct device *dev,
1870 struct device_attribute *attr,
1873 struct bmc_device *bmc = dev_get_drvdata(dev);
1875 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
1878 static ssize_t aux_firmware_rev_show(struct device *dev,
1879 struct device_attribute *attr,
1882 struct bmc_device *bmc = dev_get_drvdata(dev);
1884 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
1885 bmc->id.aux_firmware_revision[3],
1886 bmc->id.aux_firmware_revision[2],
1887 bmc->id.aux_firmware_revision[1],
1888 bmc->id.aux_firmware_revision[0]);
1891 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
1894 struct bmc_device *bmc = dev_get_drvdata(dev);
1896 return snprintf(buf, 100, "%Lx%Lx\n",
1897 (long long) bmc->guid[0],
1898 (long long) bmc->guid[8]);
1902 cleanup_bmc_device(struct kref *ref)
1904 struct bmc_device *bmc;
1906 bmc = container_of(ref, struct bmc_device, refcount);
1908 device_remove_file(&bmc->dev->dev,
1909 &bmc->device_id_attr);
1910 device_remove_file(&bmc->dev->dev,
1911 &bmc->provides_dev_sdrs_attr);
1912 device_remove_file(&bmc->dev->dev,
1913 &bmc->revision_attr);
1914 device_remove_file(&bmc->dev->dev,
1915 &bmc->firmware_rev_attr);
1916 device_remove_file(&bmc->dev->dev,
1917 &bmc->version_attr);
1918 device_remove_file(&bmc->dev->dev,
1919 &bmc->add_dev_support_attr);
1920 device_remove_file(&bmc->dev->dev,
1921 &bmc->manufacturer_id_attr);
1922 device_remove_file(&bmc->dev->dev,
1923 &bmc->product_id_attr);
1924 if (bmc->id.aux_firmware_revision_set)
1925 device_remove_file(&bmc->dev->dev,
1926 &bmc->aux_firmware_rev_attr);
1928 device_remove_file(&bmc->dev->dev,
1930 platform_device_unregister(bmc->dev);
1934 static void ipmi_bmc_unregister(ipmi_smi_t intf)
1936 struct bmc_device *bmc = intf->bmc;
1938 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
1939 if (intf->my_dev_name) {
1940 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
1941 kfree(intf->my_dev_name);
1942 intf->my_dev_name = NULL;
1945 mutex_lock(&ipmidriver_mutex);
1946 kref_put(&bmc->refcount, cleanup_bmc_device);
1947 mutex_unlock(&ipmidriver_mutex);
1950 static int ipmi_bmc_register(ipmi_smi_t intf)
1953 struct bmc_device *bmc = intf->bmc;
1954 struct bmc_device *old_bmc;
1958 mutex_lock(&ipmidriver_mutex);
1961 * Try to find if there is an bmc_device struct
1962 * representing the interfaced BMC already
1965 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
1967 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
1972 * If there is already an bmc_device, free the new one,
1973 * otherwise register the new BMC device
1977 intf->bmc = old_bmc;
1980 kref_get(&bmc->refcount);
1981 mutex_unlock(&ipmidriver_mutex);
1984 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
1985 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
1986 bmc->id.manufacturer_id,
1990 bmc->dev = platform_device_alloc("ipmi_bmc",
1995 " Unable to allocate platform device\n");
1998 bmc->dev->dev.driver = &ipmidriver;
1999 dev_set_drvdata(&bmc->dev->dev, bmc);
2000 kref_init(&bmc->refcount);
2002 rv = platform_device_register(bmc->dev);
2003 mutex_unlock(&ipmidriver_mutex);
2007 " Unable to register bmc device: %d\n",
2009 /* Don't go to out_err, you can only do that if
2010 the device is registered already. */
2014 bmc->device_id_attr.attr.name = "device_id";
2015 bmc->device_id_attr.attr.owner = THIS_MODULE;
2016 bmc->device_id_attr.attr.mode = S_IRUGO;
2017 bmc->device_id_attr.show = device_id_show;
2019 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2020 bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
2021 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2022 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2025 bmc->revision_attr.attr.name = "revision";
2026 bmc->revision_attr.attr.owner = THIS_MODULE;
2027 bmc->revision_attr.attr.mode = S_IRUGO;
2028 bmc->revision_attr.show = revision_show;
2030 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2031 bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
2032 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2033 bmc->firmware_rev_attr.show = firmware_rev_show;
2035 bmc->version_attr.attr.name = "ipmi_version";
2036 bmc->version_attr.attr.owner = THIS_MODULE;
2037 bmc->version_attr.attr.mode = S_IRUGO;
2038 bmc->version_attr.show = ipmi_version_show;
2040 bmc->add_dev_support_attr.attr.name
2041 = "additional_device_support";
2042 bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
2043 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2044 bmc->add_dev_support_attr.show = add_dev_support_show;
2046 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2047 bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
2048 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2049 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2051 bmc->product_id_attr.attr.name = "product_id";
2052 bmc->product_id_attr.attr.owner = THIS_MODULE;
2053 bmc->product_id_attr.attr.mode = S_IRUGO;
2054 bmc->product_id_attr.show = product_id_show;
2056 bmc->guid_attr.attr.name = "guid";
2057 bmc->guid_attr.attr.owner = THIS_MODULE;
2058 bmc->guid_attr.attr.mode = S_IRUGO;
2059 bmc->guid_attr.show = guid_show;
2061 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2062 bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
2063 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2064 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2066 device_create_file(&bmc->dev->dev,
2067 &bmc->device_id_attr);
2068 device_create_file(&bmc->dev->dev,
2069 &bmc->provides_dev_sdrs_attr);
2070 device_create_file(&bmc->dev->dev,
2071 &bmc->revision_attr);
2072 device_create_file(&bmc->dev->dev,
2073 &bmc->firmware_rev_attr);
2074 device_create_file(&bmc->dev->dev,
2075 &bmc->version_attr);
2076 device_create_file(&bmc->dev->dev,
2077 &bmc->add_dev_support_attr);
2078 device_create_file(&bmc->dev->dev,
2079 &bmc->manufacturer_id_attr);
2080 device_create_file(&bmc->dev->dev,
2081 &bmc->product_id_attr);
2082 if (bmc->id.aux_firmware_revision_set)
2083 device_create_file(&bmc->dev->dev,
2084 &bmc->aux_firmware_rev_attr);
2086 device_create_file(&bmc->dev->dev,
2090 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2091 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2092 bmc->id.manufacturer_id,
2098 * create symlink from system interface device to bmc device
2101 rv = sysfs_create_link(&intf->si_dev->kobj,
2102 &bmc->dev->dev.kobj, "bmc");
2105 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2110 size = snprintf(dummy, 0, "ipmi%d", intf->intf_num);
2111 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2112 if (!intf->my_dev_name) {
2115 "ipmi_msghandler: allocate link from BMC: %d\n",
2119 snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num);
2121 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2124 kfree(intf->my_dev_name);
2125 intf->my_dev_name = NULL;
2128 " Unable to create symlink to bmc: %d\n",
2136 ipmi_bmc_unregister(intf);
2141 send_guid_cmd(ipmi_smi_t intf, int chan)
2143 struct kernel_ipmi_msg msg;
2144 struct ipmi_system_interface_addr si;
2146 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2147 si.channel = IPMI_BMC_CHANNEL;
2150 msg.netfn = IPMI_NETFN_APP_REQUEST;
2151 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2154 return i_ipmi_request(NULL,
2156 (struct ipmi_addr *) &si,
2163 intf->channels[0].address,
2164 intf->channels[0].lun,
2169 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2171 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2172 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2173 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2177 if (msg->msg.data[0] != 0) {
2178 /* Error from getting the GUID, the BMC doesn't have one. */
2179 intf->bmc->guid_set = 0;
2183 if (msg->msg.data_len < 17) {
2184 intf->bmc->guid_set = 0;
2185 printk(KERN_WARNING PFX
2186 "guid_handler: The GUID response from the BMC was too"
2187 " short, it was %d but should have been 17. Assuming"
2188 " GUID is not available.\n",
2193 memcpy(intf->bmc->guid, msg->msg.data, 16);
2194 intf->bmc->guid_set = 1;
2196 wake_up(&intf->waitq);
2200 get_guid(ipmi_smi_t intf)
2204 intf->bmc->guid_set = 0x2;
2205 intf->null_user_handler = guid_handler;
2206 rv = send_guid_cmd(intf, 0);
2208 /* Send failed, no GUID available. */
2209 intf->bmc->guid_set = 0;
2210 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2211 intf->null_user_handler = NULL;
2215 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2217 struct kernel_ipmi_msg msg;
2218 unsigned char data[1];
2219 struct ipmi_system_interface_addr si;
2221 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2222 si.channel = IPMI_BMC_CHANNEL;
2225 msg.netfn = IPMI_NETFN_APP_REQUEST;
2226 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2230 return i_ipmi_request(NULL,
2232 (struct ipmi_addr *) &si,
2239 intf->channels[0].address,
2240 intf->channels[0].lun,
2245 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2250 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2251 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2252 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
2254 /* It's the one we want */
2255 if (msg->msg.data[0] != 0) {
2256 /* Got an error from the channel, just go on. */
2258 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2259 /* If the MC does not support this
2260 command, that is legal. We just
2261 assume it has one IPMB at channel
2263 intf->channels[0].medium
2264 = IPMI_CHANNEL_MEDIUM_IPMB;
2265 intf->channels[0].protocol
2266 = IPMI_CHANNEL_PROTOCOL_IPMB;
2269 intf->curr_channel = IPMI_MAX_CHANNELS;
2270 wake_up(&intf->waitq);
2275 if (msg->msg.data_len < 4) {
2276 /* Message not big enough, just go on. */
2279 chan = intf->curr_channel;
2280 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2281 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2284 intf->curr_channel++;
2285 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2286 wake_up(&intf->waitq);
2288 rv = send_channel_info_cmd(intf, intf->curr_channel);
2291 /* Got an error somehow, just give up. */
2292 intf->curr_channel = IPMI_MAX_CHANNELS;
2293 wake_up(&intf->waitq);
2295 printk(KERN_WARNING PFX
2296 "Error sending channel information: %d\n",
2304 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2306 struct ipmi_device_id *device_id,
2307 struct device *si_dev,
2308 unsigned char slave_addr)
2313 unsigned long flags;
2317 version_major = ipmi_version_major(device_id);
2318 version_minor = ipmi_version_minor(device_id);
2320 /* Make sure the driver is actually initialized, this handles
2321 problems with initialization order. */
2323 rv = ipmi_init_msghandler();
2326 /* The init code doesn't return an error if it was turned
2327 off, but it won't initialize. Check that. */
2332 intf = kmalloc(sizeof(*intf), GFP_KERNEL);
2335 memset(intf, 0, sizeof(*intf));
2336 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2341 intf->intf_num = -1;
2342 kref_init(&intf->refcount);
2343 intf->bmc->id = *device_id;
2344 intf->si_dev = si_dev;
2345 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2346 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2347 intf->channels[j].lun = 2;
2349 if (slave_addr != 0)
2350 intf->channels[0].address = slave_addr;
2351 INIT_LIST_HEAD(&intf->users);
2352 intf->handlers = handlers;
2353 intf->send_info = send_info;
2354 spin_lock_init(&intf->seq_lock);
2355 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2356 intf->seq_table[j].inuse = 0;
2357 intf->seq_table[j].seqid = 0;
2360 #ifdef CONFIG_PROC_FS
2361 spin_lock_init(&intf->proc_entry_lock);
2363 spin_lock_init(&intf->waiting_msgs_lock);
2364 INIT_LIST_HEAD(&intf->waiting_msgs);
2365 spin_lock_init(&intf->events_lock);
2366 INIT_LIST_HEAD(&intf->waiting_events);
2367 intf->waiting_events_count = 0;
2368 init_MUTEX(&intf->cmd_rcvrs_lock);
2369 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2370 init_waitqueue_head(&intf->waitq);
2372 spin_lock_init(&intf->counter_lock);
2373 intf->proc_dir = NULL;
2376 spin_lock_irqsave(&interfaces_lock, flags);
2377 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2378 if (ipmi_interfaces[i] == NULL) {
2380 /* Reserve the entry till we are done. */
2381 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
2386 spin_unlock_irqrestore(&interfaces_lock, flags);
2390 rv = handlers->start_processing(send_info, intf);
2396 if ((version_major > 1)
2397 || ((version_major == 1) && (version_minor >= 5)))
2399 /* Start scanning the channels to see what is
2401 intf->null_user_handler = channel_handler;
2402 intf->curr_channel = 0;
2403 rv = send_channel_info_cmd(intf, 0);
2407 /* Wait for the channel info to be read. */
2408 wait_event(intf->waitq,
2409 intf->curr_channel >= IPMI_MAX_CHANNELS);
2410 intf->null_user_handler = NULL;
2412 /* Assume a single IPMB channel at zero. */
2413 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2414 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2418 rv = add_proc_entries(intf, i);
2420 rv = ipmi_bmc_register(intf);
2425 remove_proc_entries(intf);
2426 kref_put(&intf->refcount, intf_free);
2427 if (i < MAX_IPMI_INTERFACES) {
2428 spin_lock_irqsave(&interfaces_lock, flags);
2429 ipmi_interfaces[i] = NULL;
2430 spin_unlock_irqrestore(&interfaces_lock, flags);
2433 spin_lock_irqsave(&interfaces_lock, flags);
2434 ipmi_interfaces[i] = intf;
2435 spin_unlock_irqrestore(&interfaces_lock, flags);
2436 call_smi_watchers(i, intf->si_dev);
2442 int ipmi_unregister_smi(ipmi_smi_t intf)
2445 struct ipmi_smi_watcher *w;
2446 unsigned long flags;
2448 ipmi_bmc_unregister(intf);
2450 spin_lock_irqsave(&interfaces_lock, flags);
2451 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2452 if (ipmi_interfaces[i] == intf) {
2453 /* Set the interface number reserved until we
2455 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
2456 intf->intf_num = -1;
2460 spin_unlock_irqrestore(&interfaces_lock,flags);
2462 if (i == MAX_IPMI_INTERFACES)
2465 remove_proc_entries(intf);
2467 /* Call all the watcher interfaces to tell them that
2468 an interface is gone. */
2469 down_read(&smi_watchers_sem);
2470 list_for_each_entry(w, &smi_watchers, link)
2472 up_read(&smi_watchers_sem);
2474 /* Allow the entry to be reused now. */
2475 spin_lock_irqsave(&interfaces_lock, flags);
2476 ipmi_interfaces[i] = NULL;
2477 spin_unlock_irqrestore(&interfaces_lock,flags);
2479 kref_put(&intf->refcount, intf_free);
2483 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
2484 struct ipmi_smi_msg *msg)
2486 struct ipmi_ipmb_addr ipmb_addr;
2487 struct ipmi_recv_msg *recv_msg;
2488 unsigned long flags;
2491 /* This is 11, not 10, because the response must contain a
2492 * completion code. */
2493 if (msg->rsp_size < 11) {
2494 /* Message not big enough, just ignore it. */
2495 spin_lock_irqsave(&intf->counter_lock, flags);
2496 intf->invalid_ipmb_responses++;
2497 spin_unlock_irqrestore(&intf->counter_lock, flags);
2501 if (msg->rsp[2] != 0) {
2502 /* An error getting the response, just ignore it. */
2506 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
2507 ipmb_addr.slave_addr = msg->rsp[6];
2508 ipmb_addr.channel = msg->rsp[3] & 0x0f;
2509 ipmb_addr.lun = msg->rsp[7] & 3;
2511 /* It's a response from a remote entity. Look up the sequence
2512 number and handle the response. */
2513 if (intf_find_seq(intf,
2517 (msg->rsp[4] >> 2) & (~1),
2518 (struct ipmi_addr *) &(ipmb_addr),
2521 /* We were unable to find the sequence number,
2522 so just nuke the message. */
2523 spin_lock_irqsave(&intf->counter_lock, flags);
2524 intf->unhandled_ipmb_responses++;
2525 spin_unlock_irqrestore(&intf->counter_lock, flags);
2529 memcpy(recv_msg->msg_data,
2532 /* THe other fields matched, so no need to set them, except
2533 for netfn, which needs to be the response that was
2534 returned, not the request value. */
2535 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2536 recv_msg->msg.data = recv_msg->msg_data;
2537 recv_msg->msg.data_len = msg->rsp_size - 10;
2538 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2539 spin_lock_irqsave(&intf->counter_lock, flags);
2540 intf->handled_ipmb_responses++;
2541 spin_unlock_irqrestore(&intf->counter_lock, flags);
2542 deliver_response(recv_msg);
2547 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2548 struct ipmi_smi_msg *msg)
2550 struct cmd_rcvr *rcvr;
2552 unsigned char netfn;
2554 ipmi_user_t user = NULL;
2555 struct ipmi_ipmb_addr *ipmb_addr;
2556 struct ipmi_recv_msg *recv_msg;
2557 unsigned long flags;
2559 if (msg->rsp_size < 10) {
2560 /* Message not big enough, just ignore it. */
2561 spin_lock_irqsave(&intf->counter_lock, flags);
2562 intf->invalid_commands++;
2563 spin_unlock_irqrestore(&intf->counter_lock, flags);
2567 if (msg->rsp[2] != 0) {
2568 /* An error getting the response, just ignore it. */
2572 netfn = msg->rsp[4] >> 2;
2576 rcvr = find_cmd_rcvr(intf, netfn, cmd);
2579 kref_get(&user->refcount);
2585 /* We didn't find a user, deliver an error response. */
2586 spin_lock_irqsave(&intf->counter_lock, flags);
2587 intf->unhandled_commands++;
2588 spin_unlock_irqrestore(&intf->counter_lock, flags);
2590 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2591 msg->data[1] = IPMI_SEND_MSG_CMD;
2592 msg->data[2] = msg->rsp[3];
2593 msg->data[3] = msg->rsp[6];
2594 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2595 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2596 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2598 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2599 msg->data[8] = msg->rsp[8]; /* cmd */
2600 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2601 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2602 msg->data_size = 11;
2607 printk("Invalid command:");
2608 for (m = 0; m < msg->data_size; m++)
2609 printk(" %2.2x", msg->data[m]);
2613 intf->handlers->sender(intf->send_info, msg, 0);
2615 rv = -1; /* We used the message, so return the value that
2616 causes it to not be freed or queued. */
2618 /* Deliver the message to the user. */
2619 spin_lock_irqsave(&intf->counter_lock, flags);
2620 intf->handled_commands++;
2621 spin_unlock_irqrestore(&intf->counter_lock, flags);
2623 recv_msg = ipmi_alloc_recv_msg();
2625 /* We couldn't allocate memory for the
2626 message, so requeue it for handling
2629 kref_put(&user->refcount, free_user);
2631 /* Extract the source address from the data. */
2632 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2633 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2634 ipmb_addr->slave_addr = msg->rsp[6];
2635 ipmb_addr->lun = msg->rsp[7] & 3;
2636 ipmb_addr->channel = msg->rsp[3] & 0xf;
2638 /* Extract the rest of the message information
2639 from the IPMB header.*/
2640 recv_msg->user = user;
2641 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2642 recv_msg->msgid = msg->rsp[7] >> 2;
2643 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2644 recv_msg->msg.cmd = msg->rsp[8];
2645 recv_msg->msg.data = recv_msg->msg_data;
2647 /* We chop off 10, not 9 bytes because the checksum
2648 at the end also needs to be removed. */
2649 recv_msg->msg.data_len = msg->rsp_size - 10;
2650 memcpy(recv_msg->msg_data,
2652 msg->rsp_size - 10);
2653 deliver_response(recv_msg);
2660 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2661 struct ipmi_smi_msg *msg)
2663 struct ipmi_lan_addr lan_addr;
2664 struct ipmi_recv_msg *recv_msg;
2665 unsigned long flags;
2668 /* This is 13, not 12, because the response must contain a
2669 * completion code. */
2670 if (msg->rsp_size < 13) {
2671 /* Message not big enough, just ignore it. */
2672 spin_lock_irqsave(&intf->counter_lock, flags);
2673 intf->invalid_lan_responses++;
2674 spin_unlock_irqrestore(&intf->counter_lock, flags);
2678 if (msg->rsp[2] != 0) {
2679 /* An error getting the response, just ignore it. */
2683 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
2684 lan_addr.session_handle = msg->rsp[4];
2685 lan_addr.remote_SWID = msg->rsp[8];
2686 lan_addr.local_SWID = msg->rsp[5];
2687 lan_addr.channel = msg->rsp[3] & 0x0f;
2688 lan_addr.privilege = msg->rsp[3] >> 4;
2689 lan_addr.lun = msg->rsp[9] & 3;
2691 /* It's a response from a remote entity. Look up the sequence
2692 number and handle the response. */
2693 if (intf_find_seq(intf,
2697 (msg->rsp[6] >> 2) & (~1),
2698 (struct ipmi_addr *) &(lan_addr),
2701 /* We were unable to find the sequence number,
2702 so just nuke the message. */
2703 spin_lock_irqsave(&intf->counter_lock, flags);
2704 intf->unhandled_lan_responses++;
2705 spin_unlock_irqrestore(&intf->counter_lock, flags);
2709 memcpy(recv_msg->msg_data,
2711 msg->rsp_size - 11);
2712 /* The other fields matched, so no need to set them, except
2713 for netfn, which needs to be the response that was
2714 returned, not the request value. */
2715 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2716 recv_msg->msg.data = recv_msg->msg_data;
2717 recv_msg->msg.data_len = msg->rsp_size - 12;
2718 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2719 spin_lock_irqsave(&intf->counter_lock, flags);
2720 intf->handled_lan_responses++;
2721 spin_unlock_irqrestore(&intf->counter_lock, flags);
2722 deliver_response(recv_msg);
2727 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2728 struct ipmi_smi_msg *msg)
2730 struct cmd_rcvr *rcvr;
2732 unsigned char netfn;
2734 ipmi_user_t user = NULL;
2735 struct ipmi_lan_addr *lan_addr;
2736 struct ipmi_recv_msg *recv_msg;
2737 unsigned long flags;
2739 if (msg->rsp_size < 12) {
2740 /* Message not big enough, just ignore it. */
2741 spin_lock_irqsave(&intf->counter_lock, flags);
2742 intf->invalid_commands++;
2743 spin_unlock_irqrestore(&intf->counter_lock, flags);
2747 if (msg->rsp[2] != 0) {
2748 /* An error getting the response, just ignore it. */
2752 netfn = msg->rsp[6] >> 2;
2756 rcvr = find_cmd_rcvr(intf, netfn, cmd);
2759 kref_get(&user->refcount);
2765 /* We didn't find a user, just give up. */
2766 spin_lock_irqsave(&intf->counter_lock, flags);
2767 intf->unhandled_commands++;
2768 spin_unlock_irqrestore(&intf->counter_lock, flags);
2770 rv = 0; /* Don't do anything with these messages, just
2771 allow them to be freed. */
2773 /* Deliver the message to the user. */
2774 spin_lock_irqsave(&intf->counter_lock, flags);
2775 intf->handled_commands++;
2776 spin_unlock_irqrestore(&intf->counter_lock, flags);
2778 recv_msg = ipmi_alloc_recv_msg();
2780 /* We couldn't allocate memory for the
2781 message, so requeue it for handling
2784 kref_put(&user->refcount, free_user);
2786 /* Extract the source address from the data. */
2787 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
2788 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
2789 lan_addr->session_handle = msg->rsp[4];
2790 lan_addr->remote_SWID = msg->rsp[8];
2791 lan_addr->local_SWID = msg->rsp[5];
2792 lan_addr->lun = msg->rsp[9] & 3;
2793 lan_addr->channel = msg->rsp[3] & 0xf;
2794 lan_addr->privilege = msg->rsp[3] >> 4;
2796 /* Extract the rest of the message information
2797 from the IPMB header.*/
2798 recv_msg->user = user;
2799 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2800 recv_msg->msgid = msg->rsp[9] >> 2;
2801 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2802 recv_msg->msg.cmd = msg->rsp[10];
2803 recv_msg->msg.data = recv_msg->msg_data;
2805 /* We chop off 12, not 11 bytes because the checksum
2806 at the end also needs to be removed. */
2807 recv_msg->msg.data_len = msg->rsp_size - 12;
2808 memcpy(recv_msg->msg_data,
2810 msg->rsp_size - 12);
2811 deliver_response(recv_msg);
2818 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
2819 struct ipmi_smi_msg *msg)
2821 struct ipmi_system_interface_addr *smi_addr;
2823 recv_msg->msgid = 0;
2824 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
2825 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2826 smi_addr->channel = IPMI_BMC_CHANNEL;
2827 smi_addr->lun = msg->rsp[0] & 3;
2828 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
2829 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2830 recv_msg->msg.cmd = msg->rsp[1];
2831 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
2832 recv_msg->msg.data = recv_msg->msg_data;
2833 recv_msg->msg.data_len = msg->rsp_size - 3;
2836 static int handle_read_event_rsp(ipmi_smi_t intf,
2837 struct ipmi_smi_msg *msg)
2839 struct ipmi_recv_msg *recv_msg, *recv_msg2;
2840 struct list_head msgs;
2843 int deliver_count = 0;
2844 unsigned long flags;
2846 if (msg->rsp_size < 19) {
2847 /* Message is too small to be an IPMB event. */
2848 spin_lock_irqsave(&intf->counter_lock, flags);
2849 intf->invalid_events++;
2850 spin_unlock_irqrestore(&intf->counter_lock, flags);
2854 if (msg->rsp[2] != 0) {
2855 /* An error getting the event, just ignore it. */
2859 INIT_LIST_HEAD(&msgs);
2861 spin_lock_irqsave(&intf->events_lock, flags);
2863 spin_lock(&intf->counter_lock);
2865 spin_unlock(&intf->counter_lock);
2867 /* Allocate and fill in one message for every user that is getting
2870 list_for_each_entry_rcu(user, &intf->users, link) {
2871 if (!user->gets_events)
2874 recv_msg = ipmi_alloc_recv_msg();
2877 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
2879 list_del(&recv_msg->link);
2880 ipmi_free_recv_msg(recv_msg);
2882 /* We couldn't allocate memory for the
2883 message, so requeue it for handling
2891 copy_event_into_recv_msg(recv_msg, msg);
2892 recv_msg->user = user;
2893 kref_get(&user->refcount);
2894 list_add_tail(&(recv_msg->link), &msgs);
2898 if (deliver_count) {
2899 /* Now deliver all the messages. */
2900 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
2901 list_del(&recv_msg->link);
2902 deliver_response(recv_msg);
2904 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
2905 /* No one to receive the message, put it in queue if there's
2906 not already too many things in the queue. */
2907 recv_msg = ipmi_alloc_recv_msg();
2909 /* We couldn't allocate memory for the
2910 message, so requeue it for handling
2916 copy_event_into_recv_msg(recv_msg, msg);
2917 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
2919 /* There's too many things in the queue, discard this
2921 printk(KERN_WARNING PFX "Event queue full, discarding an"
2922 " incoming event\n");
2926 spin_unlock_irqrestore(&(intf->events_lock), flags);
2931 static int handle_bmc_rsp(ipmi_smi_t intf,
2932 struct ipmi_smi_msg *msg)
2934 struct ipmi_recv_msg *recv_msg;
2935 unsigned long flags;
2936 struct ipmi_user *user;
2938 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
2939 if (recv_msg == NULL)
2941 printk(KERN_WARNING"IPMI message received with no owner. This\n"
2942 "could be because of a malformed message, or\n"
2943 "because of a hardware error. Contact your\n"
2944 "hardware vender for assistance\n");
2948 user = recv_msg->user;
2949 /* Make sure the user still exists. */
2950 if (user && !user->valid) {
2951 /* The user for the message went away, so give up. */
2952 spin_lock_irqsave(&intf->counter_lock, flags);
2953 intf->unhandled_local_responses++;
2954 spin_unlock_irqrestore(&intf->counter_lock, flags);
2955 ipmi_free_recv_msg(recv_msg);
2957 struct ipmi_system_interface_addr *smi_addr;
2959 spin_lock_irqsave(&intf->counter_lock, flags);
2960 intf->handled_local_responses++;
2961 spin_unlock_irqrestore(&intf->counter_lock, flags);
2962 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2963 recv_msg->msgid = msg->msgid;
2964 smi_addr = ((struct ipmi_system_interface_addr *)
2966 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2967 smi_addr->channel = IPMI_BMC_CHANNEL;
2968 smi_addr->lun = msg->rsp[0] & 3;
2969 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2970 recv_msg->msg.cmd = msg->rsp[1];
2971 memcpy(recv_msg->msg_data,
2974 recv_msg->msg.data = recv_msg->msg_data;
2975 recv_msg->msg.data_len = msg->rsp_size - 2;
2976 deliver_response(recv_msg);
2982 /* Handle a new message. Return 1 if the message should be requeued,
2983 0 if the message should be freed, or -1 if the message should not
2984 be freed or requeued. */
2985 static int handle_new_recv_msg(ipmi_smi_t intf,
2986 struct ipmi_smi_msg *msg)
2994 for (m = 0; m < msg->rsp_size; m++)
2995 printk(" %2.2x", msg->rsp[m]);
2998 if (msg->rsp_size < 2) {
2999 /* Message is too small to be correct. */
3000 printk(KERN_WARNING PFX "BMC returned to small a message"
3001 " for netfn %x cmd %x, got %d bytes\n",
3002 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3004 /* Generate an error response for the message. */
3005 msg->rsp[0] = msg->data[0] | (1 << 2);
3006 msg->rsp[1] = msg->data[1];
3007 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3009 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
3010 || (msg->rsp[1] != msg->data[1])) /* Command */
3012 /* The response is not even marginally correct. */
3013 printk(KERN_WARNING PFX "BMC returned incorrect response,"
3014 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3015 (msg->data[0] >> 2) | 1, msg->data[1],
3016 msg->rsp[0] >> 2, msg->rsp[1]);
3018 /* Generate an error response for the message. */
3019 msg->rsp[0] = msg->data[0] | (1 << 2);
3020 msg->rsp[1] = msg->data[1];
3021 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3025 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3026 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3027 && (msg->user_data != NULL))
3029 /* It's a response to a response we sent. For this we
3030 deliver a send message response to the user. */
3031 struct ipmi_recv_msg *recv_msg = msg->user_data;
3034 if (msg->rsp_size < 2)
3035 /* Message is too small to be correct. */
3038 chan = msg->data[2] & 0x0f;
3039 if (chan >= IPMI_MAX_CHANNELS)
3040 /* Invalid channel number */
3046 /* Make sure the user still exists. */
3047 if (!recv_msg->user || !recv_msg->user->valid)
3050 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3051 recv_msg->msg.data = recv_msg->msg_data;
3052 recv_msg->msg.data_len = 1;
3053 recv_msg->msg_data[0] = msg->rsp[2];
3054 deliver_response(recv_msg);
3055 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3056 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
3058 /* It's from the receive queue. */
3059 chan = msg->rsp[3] & 0xf;
3060 if (chan >= IPMI_MAX_CHANNELS) {
3061 /* Invalid channel number */
3066 switch (intf->channels[chan].medium) {
3067 case IPMI_CHANNEL_MEDIUM_IPMB:
3068 if (msg->rsp[4] & 0x04) {
3069 /* It's a response, so find the
3070 requesting message and send it up. */
3071 requeue = handle_ipmb_get_msg_rsp(intf, msg);
3073 /* It's a command to the SMS from some other
3074 entity. Handle that. */
3075 requeue = handle_ipmb_get_msg_cmd(intf, msg);
3079 case IPMI_CHANNEL_MEDIUM_8023LAN:
3080 case IPMI_CHANNEL_MEDIUM_ASYNC:
3081 if (msg->rsp[6] & 0x04) {
3082 /* It's a response, so find the
3083 requesting message and send it up. */
3084 requeue = handle_lan_get_msg_rsp(intf, msg);
3086 /* It's a command to the SMS from some other
3087 entity. Handle that. */
3088 requeue = handle_lan_get_msg_cmd(intf, msg);
3093 /* We don't handle the channel type, so just
3094 * free the message. */
3098 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3099 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
3101 /* It's an asyncronous event. */
3102 requeue = handle_read_event_rsp(intf, msg);
3104 /* It's a response from the local BMC. */
3105 requeue = handle_bmc_rsp(intf, msg);
3112 /* Handle a new message from the lower layer. */
3113 void ipmi_smi_msg_received(ipmi_smi_t intf,
3114 struct ipmi_smi_msg *msg)
3116 unsigned long flags;
3120 if ((msg->data_size >= 2)
3121 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3122 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3123 && (msg->user_data == NULL))
3125 /* This is the local response to a command send, start
3126 the timer for these. The user_data will not be
3127 NULL if this is a response send, and we will let
3128 response sends just go through. */
3130 /* Check for errors, if we get certain errors (ones
3131 that mean basically we can try again later), we
3132 ignore them and start the timer. Otherwise we
3133 report the error immediately. */
3134 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3135 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3136 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR))
3138 int chan = msg->rsp[3] & 0xf;
3140 /* Got an error sending the message, handle it. */
3141 spin_lock_irqsave(&intf->counter_lock, flags);
3142 if (chan >= IPMI_MAX_CHANNELS)
3143 ; /* This shouldn't happen */
3144 else if ((intf->channels[chan].medium
3145 == IPMI_CHANNEL_MEDIUM_8023LAN)
3146 || (intf->channels[chan].medium
3147 == IPMI_CHANNEL_MEDIUM_ASYNC))
3148 intf->sent_lan_command_errs++;
3150 intf->sent_ipmb_command_errs++;
3151 spin_unlock_irqrestore(&intf->counter_lock, flags);
3152 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3154 /* The message was sent, start the timer. */
3155 intf_start_seq_timer(intf, msg->msgid);
3158 ipmi_free_smi_msg(msg);
3162 /* To preserve message order, if the list is not empty, we
3163 tack this message onto the end of the list. */
3164 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3165 if (!list_empty(&intf->waiting_msgs)) {
3166 list_add_tail(&msg->link, &intf->waiting_msgs);
3167 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3170 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3172 rv = handle_new_recv_msg(intf, msg);
3174 /* Could not handle the message now, just add it to a
3175 list to handle later. */
3176 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3177 list_add_tail(&msg->link, &intf->waiting_msgs);
3178 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3179 } else if (rv == 0) {
3180 ipmi_free_smi_msg(msg);
3187 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3192 list_for_each_entry_rcu(user, &intf->users, link) {
3193 if (!user->handler->ipmi_watchdog_pretimeout)
3196 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3202 handle_msg_timeout(struct ipmi_recv_msg *msg)
3204 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3205 msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
3206 msg->msg.netfn |= 1; /* Convert to a response. */
3207 msg->msg.data_len = 1;
3208 msg->msg.data = msg->msg_data;
3209 deliver_response(msg);
3212 static struct ipmi_smi_msg *
3213 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3214 unsigned char seq, long seqid)
3216 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3218 /* If we can't allocate the message, then just return, we
3219 get 4 retries, so this should be ok. */
3222 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3223 smi_msg->data_size = recv_msg->msg.data_len;
3224 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3230 for (m = 0; m < smi_msg->data_size; m++)
3231 printk(" %2.2x", smi_msg->data[m]);
3238 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3239 struct list_head *timeouts, long timeout_period,
3240 int slot, unsigned long *flags)
3242 struct ipmi_recv_msg *msg;
3247 ent->timeout -= timeout_period;
3248 if (ent->timeout > 0)
3251 if (ent->retries_left == 0) {
3252 /* The message has used all its retries. */
3254 msg = ent->recv_msg;
3255 list_add_tail(&msg->link, timeouts);
3256 spin_lock(&intf->counter_lock);
3258 intf->timed_out_ipmb_broadcasts++;
3259 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3260 intf->timed_out_lan_commands++;
3262 intf->timed_out_ipmb_commands++;
3263 spin_unlock(&intf->counter_lock);
3265 struct ipmi_smi_msg *smi_msg;
3266 /* More retries, send again. */
3268 /* Start with the max timer, set to normal
3269 timer after the message is sent. */
3270 ent->timeout = MAX_MSG_TIMEOUT;
3271 ent->retries_left--;
3272 spin_lock(&intf->counter_lock);
3273 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3274 intf->retransmitted_lan_commands++;
3276 intf->retransmitted_ipmb_commands++;
3277 spin_unlock(&intf->counter_lock);
3279 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3284 spin_unlock_irqrestore(&intf->seq_lock, *flags);
3285 /* Send the new message. We send with a zero
3286 * priority. It timed out, I doubt time is
3287 * that critical now, and high priority
3288 * messages are really only for messages to the
3289 * local MC, which don't get resent. */
3290 intf->handlers->sender(intf->send_info,
3292 spin_lock_irqsave(&intf->seq_lock, *flags);
3296 static void ipmi_timeout_handler(long timeout_period)
3299 struct list_head timeouts;
3300 struct ipmi_recv_msg *msg, *msg2;
3301 struct ipmi_smi_msg *smi_msg, *smi_msg2;
3302 unsigned long flags;
3305 INIT_LIST_HEAD(&timeouts);
3307 spin_lock(&interfaces_lock);
3308 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3309 intf = ipmi_interfaces[i];
3310 if (IPMI_INVALID_INTERFACE(intf))
3312 kref_get(&intf->refcount);
3313 spin_unlock(&interfaces_lock);
3315 /* See if any waiting messages need to be processed. */
3316 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3317 list_for_each_entry_safe(smi_msg, smi_msg2,
3318 &intf->waiting_msgs, link) {
3319 if (!handle_new_recv_msg(intf, smi_msg)) {
3320 list_del(&smi_msg->link);
3321 ipmi_free_smi_msg(smi_msg);
3323 /* To preserve message order, quit if we
3324 can't handle a message. */
3328 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3330 /* Go through the seq table and find any messages that
3331 have timed out, putting them in the timeouts
3333 spin_lock_irqsave(&intf->seq_lock, flags);
3334 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++)
3335 check_msg_timeout(intf, &(intf->seq_table[j]),
3336 &timeouts, timeout_period, j,
3338 spin_unlock_irqrestore(&intf->seq_lock, flags);
3340 list_for_each_entry_safe(msg, msg2, &timeouts, link)
3341 handle_msg_timeout(msg);
3343 kref_put(&intf->refcount, intf_free);
3344 spin_lock(&interfaces_lock);
3346 spin_unlock(&interfaces_lock);
3349 static void ipmi_request_event(void)
3354 spin_lock(&interfaces_lock);
3355 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3356 intf = ipmi_interfaces[i];
3357 if (IPMI_INVALID_INTERFACE(intf))
3360 intf->handlers->request_events(intf->send_info);
3362 spin_unlock(&interfaces_lock);
3365 static struct timer_list ipmi_timer;
3367 /* Call every ~100 ms. */
3368 #define IPMI_TIMEOUT_TIME 100
3370 /* How many jiffies does it take to get to the timeout time. */
3371 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3373 /* Request events from the queue every second (this is the number of
3374 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3375 future, IPMI will add a way to know immediately if an event is in
3376 the queue and this silliness can go away. */
3377 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3379 static atomic_t stop_operation;
3380 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3382 static void ipmi_timeout(unsigned long data)
3384 if (atomic_read(&stop_operation))
3388 if (ticks_to_req_ev == 0) {
3389 ipmi_request_event();
3390 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3393 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
3395 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3399 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
3400 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
3402 /* FIXME - convert these to slabs. */
3403 static void free_smi_msg(struct ipmi_smi_msg *msg)
3405 atomic_dec(&smi_msg_inuse_count);
3409 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3411 struct ipmi_smi_msg *rv;
3412 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
3414 rv->done = free_smi_msg;
3415 rv->user_data = NULL;
3416 atomic_inc(&smi_msg_inuse_count);
3421 static void free_recv_msg(struct ipmi_recv_msg *msg)
3423 atomic_dec(&recv_msg_inuse_count);
3427 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3429 struct ipmi_recv_msg *rv;
3431 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
3433 rv->done = free_recv_msg;
3434 atomic_inc(&recv_msg_inuse_count);
3439 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3442 kref_put(&msg->user->refcount, free_user);
3446 #ifdef CONFIG_IPMI_PANIC_EVENT
3448 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
3452 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
3456 #ifdef CONFIG_IPMI_PANIC_STRING
3457 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3459 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3460 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3461 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3462 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3464 /* A get event receiver command, save it. */
3465 intf->event_receiver = msg->msg.data[1];
3466 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
3470 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3472 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3473 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3474 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3475 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3477 /* A get device id command, save if we are an event
3478 receiver or generator. */
3479 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3480 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3485 static void send_panic_events(char *str)
3487 struct kernel_ipmi_msg msg;
3489 unsigned char data[16];
3491 struct ipmi_system_interface_addr *si;
3492 struct ipmi_addr addr;
3493 struct ipmi_smi_msg smi_msg;
3494 struct ipmi_recv_msg recv_msg;
3496 si = (struct ipmi_system_interface_addr *) &addr;
3497 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3498 si->channel = IPMI_BMC_CHANNEL;
3501 /* Fill in an event telling that we have failed. */
3502 msg.netfn = 0x04; /* Sensor or Event. */
3503 msg.cmd = 2; /* Platform event command. */
3506 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3507 data[1] = 0x03; /* This is for IPMI 1.0. */
3508 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3509 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3510 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3512 /* Put a few breadcrumbs in. Hopefully later we can add more things
3513 to make the panic events more useful. */
3520 smi_msg.done = dummy_smi_done_handler;
3521 recv_msg.done = dummy_recv_done_handler;
3523 /* For every registered interface, send the event. */
3524 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3525 intf = ipmi_interfaces[i];
3526 if (IPMI_INVALID_INTERFACE(intf))
3529 /* Send the event announcing the panic. */
3530 intf->handlers->set_run_to_completion(intf->send_info, 1);
3531 i_ipmi_request(NULL,
3540 intf->channels[0].address,
3541 intf->channels[0].lun,
3542 0, 1); /* Don't retry, and don't wait. */
3545 #ifdef CONFIG_IPMI_PANIC_STRING
3546 /* On every interface, dump a bunch of OEM event holding the
3551 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3553 struct ipmi_ipmb_addr *ipmb;
3556 intf = ipmi_interfaces[i];
3557 if (IPMI_INVALID_INTERFACE(intf))
3560 /* First job here is to figure out where to send the
3561 OEM events. There's no way in IPMI to send OEM
3562 events using an event send command, so we have to
3563 find the SEL to put them in and stick them in
3566 /* Get capabilities from the get device id. */
3567 intf->local_sel_device = 0;
3568 intf->local_event_generator = 0;
3569 intf->event_receiver = 0;
3571 /* Request the device info from the local MC. */
3572 msg.netfn = IPMI_NETFN_APP_REQUEST;
3573 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3576 intf->null_user_handler = device_id_fetcher;
3577 i_ipmi_request(NULL,
3586 intf->channels[0].address,
3587 intf->channels[0].lun,
3588 0, 1); /* Don't retry, and don't wait. */
3590 if (intf->local_event_generator) {
3591 /* Request the event receiver from the local MC. */
3592 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3593 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3596 intf->null_user_handler = event_receiver_fetcher;
3597 i_ipmi_request(NULL,
3606 intf->channels[0].address,
3607 intf->channels[0].lun,
3608 0, 1); /* no retry, and no wait. */
3610 intf->null_user_handler = NULL;
3612 /* Validate the event receiver. The low bit must not
3613 be 1 (it must be a valid IPMB address), it cannot
3614 be zero, and it must not be my address. */
3615 if (((intf->event_receiver & 1) == 0)
3616 && (intf->event_receiver != 0)
3617 && (intf->event_receiver != intf->channels[0].address))
3619 /* The event receiver is valid, send an IPMB
3621 ipmb = (struct ipmi_ipmb_addr *) &addr;
3622 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
3623 ipmb->channel = 0; /* FIXME - is this right? */
3624 ipmb->lun = intf->event_receiver_lun;
3625 ipmb->slave_addr = intf->event_receiver;
3626 } else if (intf->local_sel_device) {
3627 /* The event receiver was not valid (or was
3628 me), but I am an SEL device, just dump it
3630 si = (struct ipmi_system_interface_addr *) &addr;
3631 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3632 si->channel = IPMI_BMC_CHANNEL;
3635 continue; /* No where to send the event. */
3638 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
3639 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
3645 int size = strlen(p);
3651 data[2] = 0xf0; /* OEM event without timestamp. */
3652 data[3] = intf->channels[0].address;
3653 data[4] = j++; /* sequence # */
3654 /* Always give 11 bytes, so strncpy will fill
3655 it with zeroes for me. */
3656 strncpy(data+5, p, 11);
3659 i_ipmi_request(NULL,
3668 intf->channels[0].address,
3669 intf->channels[0].lun,
3670 0, 1); /* no retry, and no wait. */
3673 #endif /* CONFIG_IPMI_PANIC_STRING */
3675 #endif /* CONFIG_IPMI_PANIC_EVENT */
3677 static int has_paniced = 0;
3679 static int panic_event(struct notifier_block *this,
3680 unsigned long event,
3690 /* For every registered interface, set it to run to completion. */
3691 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3692 intf = ipmi_interfaces[i];
3693 if (IPMI_INVALID_INTERFACE(intf))
3696 intf->handlers->set_run_to_completion(intf->send_info, 1);
3699 #ifdef CONFIG_IPMI_PANIC_EVENT
3700 send_panic_events(ptr);
3706 static struct notifier_block panic_block = {
3707 .notifier_call = panic_event,
3709 .priority = 200 /* priority: INT_MAX >= x >= 0 */
3712 static int ipmi_init_msghandler(void)
3720 rv = driver_register(&ipmidriver);
3722 printk(KERN_ERR PFX "Could not register IPMI driver\n");
3726 printk(KERN_INFO "ipmi message handler version "
3727 IPMI_DRIVER_VERSION "\n");
3729 for (i = 0; i < MAX_IPMI_INTERFACES; i++)
3730 ipmi_interfaces[i] = NULL;
3732 #ifdef CONFIG_PROC_FS
3733 proc_ipmi_root = proc_mkdir("ipmi", NULL);
3734 if (!proc_ipmi_root) {
3735 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
3739 proc_ipmi_root->owner = THIS_MODULE;
3740 #endif /* CONFIG_PROC_FS */
3742 init_timer(&ipmi_timer);
3743 ipmi_timer.data = 0;
3744 ipmi_timer.function = ipmi_timeout;
3745 ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES;
3746 add_timer(&ipmi_timer);
3748 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
3755 static __init int ipmi_init_msghandler_mod(void)
3757 ipmi_init_msghandler();
3761 static __exit void cleanup_ipmi(void)
3768 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
3770 /* This can't be called if any interfaces exist, so no worry about
3771 shutting down the interfaces. */
3773 /* Tell the timer to stop, then wait for it to stop. This avoids
3774 problems with race conditions removing the timer here. */
3775 atomic_inc(&stop_operation);
3776 del_timer_sync(&ipmi_timer);
3778 #ifdef CONFIG_PROC_FS
3779 remove_proc_entry(proc_ipmi_root->name, &proc_root);
3780 #endif /* CONFIG_PROC_FS */
3782 driver_unregister(&ipmidriver);
3786 /* Check for buffer leaks. */
3787 count = atomic_read(&smi_msg_inuse_count);
3789 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
3791 count = atomic_read(&recv_msg_inuse_count);
3793 printk(KERN_WARNING PFX "recv message count %d at exit\n",
3796 module_exit(cleanup_ipmi);
3798 module_init(ipmi_init_msghandler_mod);
3799 MODULE_LICENSE("GPL");
3800 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3801 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
3802 MODULE_VERSION(IPMI_DRIVER_VERSION);
3804 EXPORT_SYMBOL(ipmi_create_user);
3805 EXPORT_SYMBOL(ipmi_destroy_user);
3806 EXPORT_SYMBOL(ipmi_get_version);
3807 EXPORT_SYMBOL(ipmi_request_settime);
3808 EXPORT_SYMBOL(ipmi_request_supply_msgs);
3809 EXPORT_SYMBOL(ipmi_register_smi);
3810 EXPORT_SYMBOL(ipmi_unregister_smi);
3811 EXPORT_SYMBOL(ipmi_register_for_cmd);
3812 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
3813 EXPORT_SYMBOL(ipmi_smi_msg_received);
3814 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3815 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
3816 EXPORT_SYMBOL(ipmi_addr_length);
3817 EXPORT_SYMBOL(ipmi_validate_addr);
3818 EXPORT_SYMBOL(ipmi_set_gets_events);
3819 EXPORT_SYMBOL(ipmi_smi_watcher_register);
3820 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
3821 EXPORT_SYMBOL(ipmi_set_my_address);
3822 EXPORT_SYMBOL(ipmi_get_my_address);
3823 EXPORT_SYMBOL(ipmi_set_my_LUN);
3824 EXPORT_SYMBOL(ipmi_get_my_LUN);
3825 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
3826 EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
3827 EXPORT_SYMBOL(ipmi_free_recv_msg);