Merge branch 'merge'
[pandora-kernel.git] / arch / powerpc / platforms / iseries / viopath.c
1 /* -*- linux-c -*-
2  *
3  *  iSeries Virtual I/O Message Path code
4  *
5  *  Authors: Dave Boutcher <boutcher@us.ibm.com>
6  *           Ryan Arnold <ryanarn@us.ibm.com>
7  *           Colin Devilbiss <devilbis@us.ibm.com>
8  *
9  * (C) Copyright 2000-2005 IBM Corporation
10  *
11  * This code is used by the iSeries virtual disk, cd,
12  * tape, and console to communicate with OS/400 in another
13  * partition.
14  *
15  * This program is free software;  you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License as
17  * published by the Free Software Foundation; either version 2 of the
18  * License, or (at your option) anyu later version.
19  *
20  * This program is distributed in the hope that it will be useful, but
21  * WITHOUT ANY WARRANTY; without even the implied warranty of
22  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
23  * General Public License for more details.
24  *
25  * You should have received a copy of the GNU General Public License
26  * along with this program; if not, write to the Free Software Foundation,
27  * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28  *
29  */
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/errno.h>
33 #include <linux/vmalloc.h>
34 #include <linux/string.h>
35 #include <linux/proc_fs.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/wait.h>
38 #include <linux/seq_file.h>
39 #include <linux/smp_lock.h>
40 #include <linux/interrupt.h>
41
42 #include <asm/system.h>
43 #include <asm/uaccess.h>
44 #include <asm/prom.h>
45 #include <asm/iseries/hv_types.h>
46 #include <asm/iseries/hv_lp_event.h>
47 #include <asm/iseries/hv_lp_config.h>
48 #include <asm/iseries/mf.h>
49 #include <asm/iseries/vio.h>
50
51 /* Status of the path to each other partition in the system.
52  * This is overkill, since we will only ever establish connections
53  * to our hosting partition and the primary partition on the system.
54  * But this allows for other support in the future.
55  */
56 static struct viopathStatus {
57         int isOpen;             /* Did we open the path?            */
58         int isActive;           /* Do we have a mon msg outstanding */
59         int users[VIO_MAX_SUBTYPES];
60         HvLpInstanceId mSourceInst;
61         HvLpInstanceId mTargetInst;
62         int numberAllocated;
63 } viopathStatus[HVMAXARCHITECTEDLPS];
64
65 static DEFINE_SPINLOCK(statuslock);
66
67 /*
68  * For each kind of event we allocate a buffer that is
69  * guaranteed not to cross a page boundary
70  */
71 static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256]
72         __attribute__((__aligned__(4096)));
73 static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
74 static int event_buffer_initialised;
75
76 static void handleMonitorEvent(struct HvLpEvent *event);
77
78 /*
79  * We use this structure to handle asynchronous responses.  The caller
80  * blocks on the semaphore and the handler posts the semaphore.  However,
81  * if system_state is not SYSTEM_RUNNING, then wait_atomic is used ...
82  */
83 struct alloc_parms {
84         struct semaphore sem;
85         int number;
86         atomic_t wait_atomic;
87         int used_wait_atomic;
88 };
89
90 /* Put a sequence number in each mon msg.  The value is not
91  * important.  Start at something other than 0 just for
92  * readability.  wrapping this is ok.
93  */
94 static u8 viomonseq = 22;
95
96 /* Our hosting logical partition.  We get this at startup
97  * time, and different modules access this variable directly.
98  */
99 HvLpIndex viopath_hostLp = HvLpIndexInvalid;
100 EXPORT_SYMBOL(viopath_hostLp);
101 HvLpIndex viopath_ourLp = HvLpIndexInvalid;
102 EXPORT_SYMBOL(viopath_ourLp);
103
104 /* For each kind of incoming event we set a pointer to a
105  * routine to call.
106  */
107 static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
108
109 #define VIOPATH_KERN_WARN       KERN_WARNING "viopath: "
110 #define VIOPATH_KERN_INFO       KERN_INFO "viopath: "
111
112 static int proc_viopath_show(struct seq_file *m, void *v)
113 {
114         char *buf;
115         u16 vlanMap;
116         dma_addr_t handle;
117         HvLpEvent_Rc hvrc;
118         DECLARE_MUTEX_LOCKED(Semaphore);
119         struct device_node *node;
120         const char *sysid;
121
122         buf = kmalloc(HW_PAGE_SIZE, GFP_KERNEL);
123         if (!buf)
124                 return 0;
125         memset(buf, 0, HW_PAGE_SIZE);
126
127         handle = dma_map_single(iSeries_vio_dev, buf, HW_PAGE_SIZE,
128                                 DMA_FROM_DEVICE);
129
130         hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
131                         HvLpEvent_Type_VirtualIo,
132                         viomajorsubtype_config | vioconfigget,
133                         HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
134                         viopath_sourceinst(viopath_hostLp),
135                         viopath_targetinst(viopath_hostLp),
136                         (u64)(unsigned long)&Semaphore, VIOVERSION << 16,
137                         ((u64)handle) << 32, HW_PAGE_SIZE, 0, 0);
138
139         if (hvrc != HvLpEvent_Rc_Good)
140                 printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc);
141
142         down(&Semaphore);
143
144         vlanMap = HvLpConfig_getVirtualLanIndexMap();
145
146         buf[HW_PAGE_SIZE-1] = '\0';
147         seq_printf(m, "%s", buf);
148
149         dma_unmap_single(iSeries_vio_dev, handle, HW_PAGE_SIZE,
150                          DMA_FROM_DEVICE);
151         kfree(buf);
152
153         seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap);
154
155         node = of_find_node_by_path("/");
156         sysid = NULL;
157         if (node != NULL)
158                 sysid = get_property(node, "system-id", NULL);
159
160         if (sysid == NULL)
161                 seq_printf(m, "SRLNBR=<UNKNOWN>\n");
162         else
163                 /* Skip "IBM," on front of serial number, see dt.c */
164                 seq_printf(m, "SRLNBR=%s\n", sysid + 4);
165
166         of_node_put(node);
167
168         return 0;
169 }
170
171 static int proc_viopath_open(struct inode *inode, struct file *file)
172 {
173         return single_open(file, proc_viopath_show, NULL);
174 }
175
176 static struct file_operations proc_viopath_operations = {
177         .open           = proc_viopath_open,
178         .read           = seq_read,
179         .llseek         = seq_lseek,
180         .release        = single_release,
181 };
182
183 static int __init vio_proc_init(void)
184 {
185         struct proc_dir_entry *e;
186
187         e = create_proc_entry("iSeries/config", 0, NULL);
188         if (e)
189                 e->proc_fops = &proc_viopath_operations;
190
191         return 0;
192 }
193 __initcall(vio_proc_init);
194
195 /* See if a given LP is active.  Allow for invalid lps to be passed in
196  * and just return invalid
197  */
198 int viopath_isactive(HvLpIndex lp)
199 {
200         if (lp == HvLpIndexInvalid)
201                 return 0;
202         if (lp < HVMAXARCHITECTEDLPS)
203                 return viopathStatus[lp].isActive;
204         else
205                 return 0;
206 }
207 EXPORT_SYMBOL(viopath_isactive);
208
209 /*
210  * We cache the source and target instance ids for each
211  * partition.
212  */
213 HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
214 {
215         return viopathStatus[lp].mSourceInst;
216 }
217 EXPORT_SYMBOL(viopath_sourceinst);
218
219 HvLpInstanceId viopath_targetinst(HvLpIndex lp)
220 {
221         return viopathStatus[lp].mTargetInst;
222 }
223 EXPORT_SYMBOL(viopath_targetinst);
224
225 /*
226  * Send a monitor message.  This is a message with the acknowledge
227  * bit on that the other side will NOT explicitly acknowledge.  When
228  * the other side goes down, the hypervisor will acknowledge any
229  * outstanding messages....so we will know when the other side dies.
230  */
231 static void sendMonMsg(HvLpIndex remoteLp)
232 {
233         HvLpEvent_Rc hvrc;
234
235         viopathStatus[remoteLp].mSourceInst =
236                 HvCallEvent_getSourceLpInstanceId(remoteLp,
237                                 HvLpEvent_Type_VirtualIo);
238         viopathStatus[remoteLp].mTargetInst =
239                 HvCallEvent_getTargetLpInstanceId(remoteLp,
240                                 HvLpEvent_Type_VirtualIo);
241
242         /*
243          * Deliberately ignore the return code here.  if we call this
244          * more than once, we don't care.
245          */
246         vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
247
248         hvrc = HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo,
249                         viomajorsubtype_monitor, HvLpEvent_AckInd_DoAck,
250                         HvLpEvent_AckType_DeferredAck,
251                         viopathStatus[remoteLp].mSourceInst,
252                         viopathStatus[remoteLp].mTargetInst,
253                         viomonseq++, 0, 0, 0, 0, 0);
254
255         if (hvrc == HvLpEvent_Rc_Good)
256                 viopathStatus[remoteLp].isActive = 1;
257         else {
258                 printk(VIOPATH_KERN_WARN "could not connect to partition %d\n",
259                                 remoteLp);
260                 viopathStatus[remoteLp].isActive = 0;
261         }
262 }
263
264 static void handleMonitorEvent(struct HvLpEvent *event)
265 {
266         HvLpIndex remoteLp;
267         int i;
268
269         /*
270          * This handler is _also_ called as part of the loop
271          * at the end of this routine, so it must be able to
272          * ignore NULL events...
273          */
274         if (!event)
275                 return;
276
277         /*
278          * First see if this is just a normal monitor message from the
279          * other partition
280          */
281         if (hvlpevent_is_int(event)) {
282                 remoteLp = event->xSourceLp;
283                 if (!viopathStatus[remoteLp].isActive)
284                         sendMonMsg(remoteLp);
285                 return;
286         }
287
288         /*
289          * This path is for an acknowledgement; the other partition
290          * died
291          */
292         remoteLp = event->xTargetLp;
293         if ((event->xSourceInstanceId != viopathStatus[remoteLp].mSourceInst) ||
294             (event->xTargetInstanceId != viopathStatus[remoteLp].mTargetInst)) {
295                 printk(VIOPATH_KERN_WARN "ignoring ack....mismatched instances\n");
296                 return;
297         }
298
299         printk(VIOPATH_KERN_WARN "partition %d ended\n", remoteLp);
300
301         viopathStatus[remoteLp].isActive = 0;
302
303         /*
304          * For each active handler, pass them a NULL
305          * message to indicate that the other partition
306          * died
307          */
308         for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
309                 if (vio_handler[i] != NULL)
310                         (*vio_handler[i])(NULL);
311         }
312 }
313
314 int vio_setHandler(int subtype, vio_event_handler_t *beh)
315 {
316         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
317         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
318                 return -EINVAL;
319         if (vio_handler[subtype] != NULL)
320                 return -EBUSY;
321         vio_handler[subtype] = beh;
322         return 0;
323 }
324 EXPORT_SYMBOL(vio_setHandler);
325
326 int vio_clearHandler(int subtype)
327 {
328         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
329         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
330                 return -EINVAL;
331         if (vio_handler[subtype] == NULL)
332                 return -EAGAIN;
333         vio_handler[subtype] = NULL;
334         return 0;
335 }
336 EXPORT_SYMBOL(vio_clearHandler);
337
338 static void handleConfig(struct HvLpEvent *event)
339 {
340         if (!event)
341                 return;
342         if (hvlpevent_is_int(event)) {
343                 printk(VIOPATH_KERN_WARN
344                        "unexpected config request from partition %d",
345                        event->xSourceLp);
346
347                 if (hvlpevent_need_ack(event)) {
348                         event->xRc = HvLpEvent_Rc_InvalidSubtype;
349                         HvCallEvent_ackLpEvent(event);
350                 }
351                 return;
352         }
353
354         up((struct semaphore *)event->xCorrelationToken);
355 }
356
357 /*
358  * Initialization of the hosting partition
359  */
360 void vio_set_hostlp(void)
361 {
362         /*
363          * If this has already been set then we DON'T want to either change
364          * it or re-register the proc file system
365          */
366         if (viopath_hostLp != HvLpIndexInvalid)
367                 return;
368
369         /*
370          * Figure out our hosting partition.  This isn't allowed to change
371          * while we're active
372          */
373         viopath_ourLp = HvLpConfig_getLpIndex();
374         viopath_hostLp = HvLpConfig_getHostingLpIndex(viopath_ourLp);
375
376         if (viopath_hostLp != HvLpIndexInvalid)
377                 vio_setHandler(viomajorsubtype_config, handleConfig);
378 }
379 EXPORT_SYMBOL(vio_set_hostlp);
380
381 static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
382 {
383         HvLpIndex remoteLp;
384         int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
385                 >> VIOMAJOR_SUBTYPE_SHIFT;
386
387         if (hvlpevent_is_int(event)) {
388                 remoteLp = event->xSourceLp;
389                 /*
390                  * The isActive is checked because if the hosting partition
391                  * went down and came back up it would not be active but it
392                  * would have different source and target instances, in which
393                  * case we'd want to reset them.  This case really protects
394                  * against an unauthorized active partition sending interrupts
395                  * or acks to this linux partition.
396                  */
397                 if (viopathStatus[remoteLp].isActive
398                     && (event->xSourceInstanceId !=
399                         viopathStatus[remoteLp].mTargetInst)) {
400                         printk(VIOPATH_KERN_WARN
401                                "message from invalid partition. "
402                                "int msg rcvd, source inst (%d) doesnt match (%d)\n",
403                                viopathStatus[remoteLp].mTargetInst,
404                                event->xSourceInstanceId);
405                         return;
406                 }
407
408                 if (viopathStatus[remoteLp].isActive
409                     && (event->xTargetInstanceId !=
410                         viopathStatus[remoteLp].mSourceInst)) {
411                         printk(VIOPATH_KERN_WARN
412                                "message from invalid partition. "
413                                "int msg rcvd, target inst (%d) doesnt match (%d)\n",
414                                viopathStatus[remoteLp].mSourceInst,
415                                event->xTargetInstanceId);
416                         return;
417                 }
418         } else {
419                 remoteLp = event->xTargetLp;
420                 if (event->xSourceInstanceId !=
421                     viopathStatus[remoteLp].mSourceInst) {
422                         printk(VIOPATH_KERN_WARN
423                                "message from invalid partition. "
424                                "ack msg rcvd, source inst (%d) doesnt match (%d)\n",
425                                viopathStatus[remoteLp].mSourceInst,
426                                event->xSourceInstanceId);
427                         return;
428                 }
429
430                 if (event->xTargetInstanceId !=
431                     viopathStatus[remoteLp].mTargetInst) {
432                         printk(VIOPATH_KERN_WARN
433                                "message from invalid partition. "
434                                "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n",
435                                viopathStatus[remoteLp].mTargetInst,
436                                event->xTargetInstanceId);
437                         return;
438                 }
439         }
440
441         if (vio_handler[subtype] == NULL) {
442                 printk(VIOPATH_KERN_WARN
443                        "unexpected virtual io event subtype %d from partition %d\n",
444                        event->xSubtype, remoteLp);
445                 /* No handler.  Ack if necessary */
446                 if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) {
447                         event->xRc = HvLpEvent_Rc_InvalidSubtype;
448                         HvCallEvent_ackLpEvent(event);
449                 }
450                 return;
451         }
452
453         /* This innocuous little line is where all the real work happens */
454         (*vio_handler[subtype])(event);
455 }
456
457 static void viopath_donealloc(void *parm, int number)
458 {
459         struct alloc_parms *parmsp = parm;
460
461         parmsp->number = number;
462         if (parmsp->used_wait_atomic)
463                 atomic_set(&parmsp->wait_atomic, 0);
464         else
465                 up(&parmsp->sem);
466 }
467
468 static int allocateEvents(HvLpIndex remoteLp, int numEvents)
469 {
470         struct alloc_parms parms;
471
472         if (system_state != SYSTEM_RUNNING) {
473                 parms.used_wait_atomic = 1;
474                 atomic_set(&parms.wait_atomic, 1);
475         } else {
476                 parms.used_wait_atomic = 0;
477                 init_MUTEX_LOCKED(&parms.sem);
478         }
479         mf_allocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo, 250,  /* It would be nice to put a real number here! */
480                             numEvents, &viopath_donealloc, &parms);
481         if (system_state != SYSTEM_RUNNING) {
482                 while (atomic_read(&parms.wait_atomic))
483                         mb();
484         } else
485                 down(&parms.sem);
486         return parms.number;
487 }
488
489 int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
490 {
491         int i;
492         unsigned long flags;
493         int tempNumAllocated;
494
495         if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
496                 return -EINVAL;
497
498         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
499         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
500                 return -EINVAL;
501
502         spin_lock_irqsave(&statuslock, flags);
503
504         if (!event_buffer_initialised) {
505                 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
506                         atomic_set(&event_buffer_available[i], 1);
507                 event_buffer_initialised = 1;
508         }
509
510         viopathStatus[remoteLp].users[subtype]++;
511
512         if (!viopathStatus[remoteLp].isOpen) {
513                 viopathStatus[remoteLp].isOpen = 1;
514                 HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualIo);
515
516                 /*
517                  * Don't hold the spinlock during an operation that
518                  * can sleep.
519                  */
520                 spin_unlock_irqrestore(&statuslock, flags);
521                 tempNumAllocated = allocateEvents(remoteLp, 1);
522                 spin_lock_irqsave(&statuslock, flags);
523
524                 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
525
526                 if (viopathStatus[remoteLp].numberAllocated == 0) {
527                         HvCallEvent_closeLpEventPath(remoteLp,
528                                         HvLpEvent_Type_VirtualIo);
529
530                         spin_unlock_irqrestore(&statuslock, flags);
531                         return -ENOMEM;
532                 }
533
534                 viopathStatus[remoteLp].mSourceInst =
535                         HvCallEvent_getSourceLpInstanceId(remoteLp,
536                                         HvLpEvent_Type_VirtualIo);
537                 viopathStatus[remoteLp].mTargetInst =
538                         HvCallEvent_getTargetLpInstanceId(remoteLp,
539                                         HvLpEvent_Type_VirtualIo);
540                 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
541                                           &vio_handleEvent);
542                 sendMonMsg(remoteLp);
543                 printk(VIOPATH_KERN_INFO "opening connection to partition %d, "
544                                 "setting sinst %d, tinst %d\n",
545                                 remoteLp, viopathStatus[remoteLp].mSourceInst,
546                                 viopathStatus[remoteLp].mTargetInst);
547         }
548
549         spin_unlock_irqrestore(&statuslock, flags);
550         tempNumAllocated = allocateEvents(remoteLp, numReq);
551         spin_lock_irqsave(&statuslock, flags);
552         viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
553         spin_unlock_irqrestore(&statuslock, flags);
554
555         return 0;
556 }
557 EXPORT_SYMBOL(viopath_open);
558
559 int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
560 {
561         unsigned long flags;
562         int i;
563         int numOpen;
564         struct alloc_parms parms;
565
566         if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
567                 return -EINVAL;
568
569         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
570         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
571                 return -EINVAL;
572
573         spin_lock_irqsave(&statuslock, flags);
574         /*
575          * If the viopath_close somehow gets called before a
576          * viopath_open it could decrement to -1 which is a non
577          * recoverable state so we'll prevent this from
578          * happening.
579          */
580         if (viopathStatus[remoteLp].users[subtype] > 0)
581                 viopathStatus[remoteLp].users[subtype]--;
582
583         spin_unlock_irqrestore(&statuslock, flags);
584
585         parms.used_wait_atomic = 0;
586         init_MUTEX_LOCKED(&parms.sem);
587         mf_deallocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo,
588                               numReq, &viopath_donealloc, &parms);
589         down(&parms.sem);
590
591         spin_lock_irqsave(&statuslock, flags);
592         for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++)
593                 numOpen += viopathStatus[remoteLp].users[i];
594
595         if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
596                 printk(VIOPATH_KERN_INFO "closing connection to partition %d",
597                                 remoteLp);
598
599                 HvCallEvent_closeLpEventPath(remoteLp,
600                                              HvLpEvent_Type_VirtualIo);
601                 viopathStatus[remoteLp].isOpen = 0;
602                 viopathStatus[remoteLp].isActive = 0;
603
604                 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
605                         atomic_set(&event_buffer_available[i], 0);
606                 event_buffer_initialised = 0;
607         }
608         spin_unlock_irqrestore(&statuslock, flags);
609         return 0;
610 }
611 EXPORT_SYMBOL(viopath_close);
612
613 void *vio_get_event_buffer(int subtype)
614 {
615         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
616         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
617                 return NULL;
618
619         if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
620                 return &event_buffer[subtype * 256];
621         else
622                 return NULL;
623 }
624 EXPORT_SYMBOL(vio_get_event_buffer);
625
626 void vio_free_event_buffer(int subtype, void *buffer)
627 {
628         subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
629         if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
630                 printk(VIOPATH_KERN_WARN
631                        "unexpected subtype %d freeing event buffer\n", subtype);
632                 return;
633         }
634
635         if (atomic_read(&event_buffer_available[subtype]) != 0) {
636                 printk(VIOPATH_KERN_WARN
637                        "freeing unallocated event buffer, subtype %d\n",
638                        subtype);
639                 return;
640         }
641
642         if (buffer != &event_buffer[subtype * 256]) {
643                 printk(VIOPATH_KERN_WARN
644                        "freeing invalid event buffer, subtype %d\n", subtype);
645         }
646
647         atomic_set(&event_buffer_available[subtype], 1);
648 }
649 EXPORT_SYMBOL(vio_free_event_buffer);
650
651 static const struct vio_error_entry vio_no_error =
652     { 0, 0, "Non-VIO Error" };
653 static const struct vio_error_entry vio_unknown_error =
654     { 0, EIO, "Unknown Error" };
655
656 static const struct vio_error_entry vio_default_errors[] = {
657         {0x0001, EIO, "No Connection"},
658         {0x0002, EIO, "No Receiver"},
659         {0x0003, EIO, "No Buffer Available"},
660         {0x0004, EBADRQC, "Invalid Message Type"},
661         {0x0000, 0, NULL},
662 };
663
664 const struct vio_error_entry *vio_lookup_rc(
665                 const struct vio_error_entry *local_table, u16 rc)
666 {
667         const struct vio_error_entry *cur;
668
669         if (!rc)
670                 return &vio_no_error;
671         if (local_table)
672                 for (cur = local_table; cur->rc; ++cur)
673                         if (cur->rc == rc)
674                                 return cur;
675         for (cur = vio_default_errors; cur->rc; ++cur)
676                 if (cur->rc == rc)
677                         return cur;
678         return &vio_unknown_error;
679 }
680 EXPORT_SYMBOL(vio_lookup_rc);