[PATCH] ppc64: Simplify counting of lpevents, remove lpevent_count from paca
[pandora-kernel.git] / arch / ppc64 / kernel / ItLpQueue.c
1 /*
2  * ItLpQueue.c
3  * Copyright (C) 2001 Mike Corrigan  IBM Corporation
4  * 
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  */
10
11 #include <linux/stddef.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/bootmem.h>
15 #include <linux/seq_file.h>
16 #include <linux/proc_fs.h>
17 #include <asm/system.h>
18 #include <asm/paca.h>
19 #include <asm/iSeries/ItLpQueue.h>
20 #include <asm/iSeries/HvLpEvent.h>
21 #include <asm/iSeries/HvCallEvent.h>
22
23 /*
24  * The LpQueue is used to pass event data from the hypervisor to
25  * the partition.  This is where I/O interrupt events are communicated.
26  *
27  * It is written to by the hypervisor so cannot end up in the BSS.
28  */
29 struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
30
31 DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
32
33 static char *event_types[HvLpEvent_Type_NumTypes] = {
34         "Hypervisor\t\t",
35         "Machine Facilities\t",
36         "Session Manager\t",
37         "SPD I/O\t\t",
38         "Virtual Bus\t\t",
39         "PCI I/O\t\t",
40         "RIO I/O\t\t",
41         "Virtual Lan\t\t",
42         "Virtual I/O\t\t"
43 };
44
45 static __inline__ int set_inUse(void)
46 {
47         int t;
48         u32 * inUseP = &hvlpevent_queue.xInUseWord;
49
50         __asm__ __volatile__("\n\
51 1:      lwarx   %0,0,%2         \n\
52         cmpwi   0,%0,0          \n\
53         li      %0,0            \n\
54         bne-    2f              \n\
55         addi    %0,%0,1         \n\
56         stwcx.  %0,0,%2         \n\
57         bne-    1b              \n\
58 2:      eieio"
59         : "=&r" (t), "=m" (hvlpevent_queue.xInUseWord)
60         : "r" (inUseP), "m" (hvlpevent_queue.xInUseWord)
61         : "cc");
62
63         return t;
64 }
65
66 static __inline__ void clear_inUse(void)
67 {
68         hvlpevent_queue.xInUseWord = 0;
69 }
70
71 /* Array of LpEvent handler functions */
72 extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
73 unsigned long ItLpQueueInProcess = 0;
74
75 static struct HvLpEvent * get_next_hvlpevent(void)
76 {
77         struct HvLpEvent * nextLpEvent = 
78                 (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
79         if ( nextLpEvent->xFlags.xValid ) {
80                 /* rmb() needed only for weakly consistent machines (regatta) */
81                 rmb();
82                 /* Set pointer to next potential event */
83                 hvlpevent_queue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
84                                       LpEventAlign ) /
85                                       LpEventAlign ) *
86                                       LpEventAlign;
87                 /* Wrap to beginning if no room at end */
88                 if (hvlpevent_queue.xSlicCurEventPtr > hvlpevent_queue.xSlicLastValidEventPtr)
89                         hvlpevent_queue.xSlicCurEventPtr = hvlpevent_queue.xSlicEventStackPtr;
90         }
91         else 
92                 nextLpEvent = NULL;
93
94         return nextLpEvent;
95 }
96
97 static unsigned long spread_lpevents = NR_CPUS;
98
99 int hvlpevent_is_pending(void)
100 {
101         struct HvLpEvent *next_event;
102
103         if (smp_processor_id() >= spread_lpevents)
104                 return 0;
105
106         next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
107         return next_event->xFlags.xValid | hvlpevent_queue.xPlicOverflowIntPending;
108 }
109
110 static void hvlpevent_clear_valid( struct HvLpEvent * event )
111 {
112         /* Clear the valid bit of the event
113          * Also clear bits within this event that might
114          * look like valid bits (on 64-byte boundaries)
115          */
116         unsigned extra = (( event->xSizeMinus1 + LpEventAlign ) /
117                                                  LpEventAlign ) - 1;
118         switch ( extra ) {
119           case 3:
120            ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0;
121           case 2:
122            ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0;
123           case 1:
124            ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0;
125           case 0:
126            ;    
127         }
128         mb();
129         event->xFlags.xValid = 0;
130 }
131
132 void process_hvlpevents(struct pt_regs *regs)
133 {
134         struct HvLpEvent * nextLpEvent;
135
136         /* If we have recursed, just return */
137         if ( !set_inUse() )
138                 return;
139         
140         if (ItLpQueueInProcess == 0)
141                 ItLpQueueInProcess = 1;
142         else
143                 BUG();
144
145         for (;;) {
146                 nextLpEvent = get_next_hvlpevent();
147                 if ( nextLpEvent ) {
148                         /* Call appropriate handler here, passing 
149                          * a pointer to the LpEvent.  The handler
150                          * must make a copy of the LpEvent if it
151                          * needs it in a bottom half. (perhaps for
152                          * an ACK)
153                          *      
154                          *  Handlers are responsible for ACK processing 
155                          *
156                          * The Hypervisor guarantees that LpEvents will
157                          * only be delivered with types that we have
158                          * registered for, so no type check is necessary
159                          * here!
160                          */
161                         if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
162                                 __get_cpu_var(hvlpevent_counts)[nextLpEvent->xType]++;
163                         if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
164                              lpEventHandler[nextLpEvent->xType] ) 
165                                 lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
166                         else
167                                 printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType );
168                         
169                         hvlpevent_clear_valid( nextLpEvent );
170                 } else if ( hvlpevent_queue.xPlicOverflowIntPending )
171                         /*
172                          * No more valid events. If overflow events are
173                          * pending process them
174                          */
175                         HvCallEvent_getOverflowLpEvents( hvlpevent_queue.xIndex);
176                 else
177                         break;
178         }
179
180         ItLpQueueInProcess = 0;
181         mb();
182         clear_inUse();
183 }
184
185 static int set_spread_lpevents(char *str)
186 {
187         unsigned long val = simple_strtoul(str, NULL, 0);
188
189         /*
190          * The parameter is the number of processors to share in processing
191          * lp events.
192          */
193         if (( val > 0) && (val <= NR_CPUS)) {
194                 spread_lpevents = val;
195                 printk("lpevent processing spread over %ld processors\n", val);
196         } else {
197                 printk("invalid spread_lpevents %ld\n", val);
198         }
199
200         return 1;
201 }
202 __setup("spread_lpevents=", set_spread_lpevents);
203
204 void setup_hvlpevent_queue(void)
205 {
206         void *eventStack;
207
208         /*
209          * Allocate a page for the Event Stack. The Hypervisor needs the
210          * absolute real address, so we subtract out the KERNELBASE and add
211          * in the absolute real address of the kernel load area.
212          */
213         eventStack = alloc_bootmem_pages(LpEventStackSize);
214         memset(eventStack, 0, LpEventStackSize);
215
216         /* Invoke the hypervisor to initialize the event stack */
217         HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
218
219         hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack;
220         hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack;
221         hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack +
222                                         (LpEventStackSize - LpEventMaxSize);
223         hvlpevent_queue.xIndex = 0;
224 }
225
226 static int proc_lpevents_show(struct seq_file *m, void *v)
227 {
228         int cpu, i;
229         unsigned long sum;
230         static unsigned long cpu_totals[NR_CPUS];
231
232         /* FIXME: do we care that there's no locking here? */
233         sum = 0;
234         for_each_online_cpu(cpu) {
235                 cpu_totals[cpu] = 0;
236                 for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
237                         cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
238                 }
239                 sum += cpu_totals[cpu];
240         }
241
242         seq_printf(m, "LpEventQueue 0\n");
243         seq_printf(m, "  events processed:\t%lu\n", sum);
244
245         for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
246                 sum = 0;
247                 for_each_online_cpu(cpu) {
248                         sum += per_cpu(hvlpevent_counts, cpu)[i];
249                 }
250
251                 seq_printf(m, "    %s %10lu\n", event_types[i], sum);
252         }
253
254         seq_printf(m, "\n  events processed by processor:\n");
255
256         for_each_online_cpu(cpu) {
257                 seq_printf(m, "    CPU%02d  %10lu\n", cpu, cpu_totals[cpu]);
258         }
259
260         return 0;
261 }
262
263 static int proc_lpevents_open(struct inode *inode, struct file *file)
264 {
265         return single_open(file, proc_lpevents_show, NULL);
266 }
267
268 static struct file_operations proc_lpevents_operations = {
269         .open           = proc_lpevents_open,
270         .read           = seq_read,
271         .llseek         = seq_lseek,
272         .release        = single_release,
273 };
274
275 static int __init proc_lpevents_init(void)
276 {
277         struct proc_dir_entry *e;
278
279         e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
280         if (e)
281                 e->proc_fops = &proc_lpevents_operations;
282
283         return 0;
284 }
285 __initcall(proc_lpevents_init);
286