[PATCH] ppc64: Don't count number of events processed for caller
[pandora-kernel.git] / arch / ppc64 / kernel / ItLpQueue.c
1 /*
2  * ItLpQueue.c
3  * Copyright (C) 2001 Mike Corrigan  IBM Corporation
4  * 
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  */
10
11 #include <linux/stddef.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/bootmem.h>
15 #include <linux/seq_file.h>
16 #include <linux/proc_fs.h>
17 #include <asm/system.h>
18 #include <asm/paca.h>
19 #include <asm/iSeries/ItLpQueue.h>
20 #include <asm/iSeries/HvLpEvent.h>
21 #include <asm/iSeries/HvCallEvent.h>
22
23 /*
24  * The LpQueue is used to pass event data from the hypervisor to
25  * the partition.  This is where I/O interrupt events are communicated.
26  *
27  * It is written to by the hypervisor so cannot end up in the BSS.
28  */
29 struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
30
31 static char *event_types[9] = {
32         "Hypervisor\t\t",
33         "Machine Facilities\t",
34         "Session Manager\t",
35         "SPD I/O\t\t",
36         "Virtual Bus\t\t",
37         "PCI I/O\t\t",
38         "RIO I/O\t\t",
39         "Virtual Lan\t\t",
40         "Virtual I/O\t\t"
41 };
42
43 static __inline__ int set_inUse(void)
44 {
45         int t;
46         u32 * inUseP = &hvlpevent_queue.xInUseWord;
47
48         __asm__ __volatile__("\n\
49 1:      lwarx   %0,0,%2         \n\
50         cmpwi   0,%0,0          \n\
51         li      %0,0            \n\
52         bne-    2f              \n\
53         addi    %0,%0,1         \n\
54         stwcx.  %0,0,%2         \n\
55         bne-    1b              \n\
56 2:      eieio"
57         : "=&r" (t), "=m" (hvlpevent_queue.xInUseWord)
58         : "r" (inUseP), "m" (hvlpevent_queue.xInUseWord)
59         : "cc");
60
61         return t;
62 }
63
64 static __inline__ void clear_inUse(void)
65 {
66         hvlpevent_queue.xInUseWord = 0;
67 }
68
69 /* Array of LpEvent handler functions */
70 extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
71 unsigned long ItLpQueueInProcess = 0;
72
73 static struct HvLpEvent * get_next_hvlpevent(void)
74 {
75         struct HvLpEvent * nextLpEvent = 
76                 (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
77         if ( nextLpEvent->xFlags.xValid ) {
78                 /* rmb() needed only for weakly consistent machines (regatta) */
79                 rmb();
80                 /* Set pointer to next potential event */
81                 hvlpevent_queue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
82                                       LpEventAlign ) /
83                                       LpEventAlign ) *
84                                       LpEventAlign;
85                 /* Wrap to beginning if no room at end */
86                 if (hvlpevent_queue.xSlicCurEventPtr > hvlpevent_queue.xSlicLastValidEventPtr)
87                         hvlpevent_queue.xSlicCurEventPtr = hvlpevent_queue.xSlicEventStackPtr;
88         }
89         else 
90                 nextLpEvent = NULL;
91
92         return nextLpEvent;
93 }
94
95 static unsigned long spread_lpevents = NR_CPUS;
96
97 int hvlpevent_is_pending(void)
98 {
99         struct HvLpEvent *next_event;
100
101         if (smp_processor_id() >= spread_lpevents)
102                 return 0;
103
104         next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
105         return next_event->xFlags.xValid | hvlpevent_queue.xPlicOverflowIntPending;
106 }
107
108 static void hvlpevent_clear_valid( struct HvLpEvent * event )
109 {
110         /* Clear the valid bit of the event
111          * Also clear bits within this event that might
112          * look like valid bits (on 64-byte boundaries)
113          */
114         unsigned extra = (( event->xSizeMinus1 + LpEventAlign ) /
115                                                  LpEventAlign ) - 1;
116         switch ( extra ) {
117           case 3:
118            ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0;
119           case 2:
120            ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0;
121           case 1:
122            ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0;
123           case 0:
124            ;    
125         }
126         mb();
127         event->xFlags.xValid = 0;
128 }
129
130 void process_hvlpevents(struct pt_regs *regs)
131 {
132         unsigned numIntsProcessed = 0;
133         struct HvLpEvent * nextLpEvent;
134
135         /* If we have recursed, just return */
136         if ( !set_inUse() )
137                 return;
138         
139         if (ItLpQueueInProcess == 0)
140                 ItLpQueueInProcess = 1;
141         else
142                 BUG();
143
144         for (;;) {
145                 nextLpEvent = get_next_hvlpevent();
146                 if ( nextLpEvent ) {
147                         ++numIntsProcessed;
148                         hvlpevent_queue.xLpIntCount++;
149                         /* Call appropriate handler here, passing 
150                          * a pointer to the LpEvent.  The handler
151                          * must make a copy of the LpEvent if it
152                          * needs it in a bottom half. (perhaps for
153                          * an ACK)
154                          *      
155                          *  Handlers are responsible for ACK processing 
156                          *
157                          * The Hypervisor guarantees that LpEvents will
158                          * only be delivered with types that we have
159                          * registered for, so no type check is necessary
160                          * here!
161                          */
162                         if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
163                                 hvlpevent_queue.xLpIntCountByType[nextLpEvent->xType]++;
164                         if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
165                              lpEventHandler[nextLpEvent->xType] ) 
166                                 lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
167                         else
168                                 printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType );
169                         
170                         hvlpevent_clear_valid( nextLpEvent );
171                 } else if ( hvlpevent_queue.xPlicOverflowIntPending )
172                         /*
173                          * No more valid events. If overflow events are
174                          * pending process them
175                          */
176                         HvCallEvent_getOverflowLpEvents( hvlpevent_queue.xIndex);
177                 else
178                         break;
179         }
180
181         ItLpQueueInProcess = 0;
182         mb();
183         clear_inUse();
184
185         get_paca()->lpevent_count += numIntsProcessed;
186 }
187
188 static int set_spread_lpevents(char *str)
189 {
190         unsigned long val = simple_strtoul(str, NULL, 0);
191
192         /*
193          * The parameter is the number of processors to share in processing
194          * lp events.
195          */
196         if (( val > 0) && (val <= NR_CPUS)) {
197                 spread_lpevents = val;
198                 printk("lpevent processing spread over %ld processors\n", val);
199         } else {
200                 printk("invalid spread_lpevents %ld\n", val);
201         }
202
203         return 1;
204 }
205 __setup("spread_lpevents=", set_spread_lpevents);
206
207 void setup_hvlpevent_queue(void)
208 {
209         void *eventStack;
210
211         /*
212          * Allocate a page for the Event Stack. The Hypervisor needs the
213          * absolute real address, so we subtract out the KERNELBASE and add
214          * in the absolute real address of the kernel load area.
215          */
216         eventStack = alloc_bootmem_pages(LpEventStackSize);
217         memset(eventStack, 0, LpEventStackSize);
218
219         /* Invoke the hypervisor to initialize the event stack */
220         HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
221
222         hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack;
223         hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack;
224         hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack +
225                                         (LpEventStackSize - LpEventMaxSize);
226         hvlpevent_queue.xIndex = 0;
227 }
228
229 static int proc_lpevents_show(struct seq_file *m, void *v)
230 {
231         unsigned int i;
232
233         seq_printf(m, "LpEventQueue 0\n");
234         seq_printf(m, "  events processed:\t%lu\n",
235                    (unsigned long)hvlpevent_queue.xLpIntCount);
236
237         for (i = 0; i < 9; ++i)
238                 seq_printf(m, "    %s %10lu\n", event_types[i],
239                            (unsigned long)hvlpevent_queue.xLpIntCountByType[i]);
240
241         seq_printf(m, "\n  events processed by processor:\n");
242
243         for_each_online_cpu(i)
244                 seq_printf(m, "    CPU%02d  %10u\n", i, paca[i].lpevent_count);
245
246         return 0;
247 }
248
249 static int proc_lpevents_open(struct inode *inode, struct file *file)
250 {
251         return single_open(file, proc_lpevents_show, NULL);
252 }
253
254 static struct file_operations proc_lpevents_operations = {
255         .open           = proc_lpevents_open,
256         .read           = seq_read,
257         .llseek         = seq_lseek,
258         .release        = single_release,
259 };
260
261 static int __init proc_lpevents_init(void)
262 {
263         struct proc_dir_entry *e;
264
265         e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
266         if (e)
267                 e->proc_fops = &proc_lpevents_operations;
268
269         return 0;
270 }
271 __initcall(proc_lpevents_init);
272