dba3d0872e7e41e19496ee16b726a5a07c3927d0
[pandora-kernel.git] / arch / mips / sibyte / sb1250 / bcm1250_tbprof.c
1 /*
2  * Copyright (C) 2001, 2002, 2003 Broadcom Corporation
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
17  */
18
19 #define SBPROF_TB_DEBUG 0
20
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/fs.h>
29 #include <linux/errno.h>
30 #include <linux/reboot.h>
31 #include <linux/wait.h>
32 #include <asm/uaccess.h>
33 #include <asm/io.h>
34 #include <asm/sibyte/sb1250.h>
35 #include <asm/sibyte/sb1250_regs.h>
36 #include <asm/sibyte/sb1250_scd.h>
37 #include <asm/sibyte/sb1250_int.h>
38 #include <asm/sibyte/trace_prof.h>
39
40 #define DEVNAME "bcm1250_tbprof"
41
42 static struct sbprof_tb sbp;
43
44 #define TB_FULL (sbp.next_tb_sample == MAX_TB_SAMPLES)
45
46 /************************************************************************
47  * Support for ZBbus sampling using the trace buffer
48  *
49  * We use the SCD performance counter interrupt, caused by a Zclk counter
50  * overflow, to trigger the start of tracing.
51  *
52  * We set the trace buffer to sample everything and freeze on
53  * overflow.
54  *
55  * We map the interrupt for trace_buffer_freeze to handle it on CPU 0.
56  *
57  ************************************************************************/
58
59 static u_int64_t tb_period;
60
61 static void arm_tb(void)
62 {
63         u_int64_t scdperfcnt;
64         u_int64_t next = (1ULL << 40) - tb_period;
65         u_int64_t tb_options = M_SCD_TRACE_CFG_FREEZE_FULL;
66         /* Generate an SCD_PERFCNT interrupt in TB_PERIOD Zclks to
67            trigger start of trace.  XXX vary sampling period */
68         bus_writeq(0, IOADDR(A_SCD_PERF_CNT_1));
69         scdperfcnt = bus_readq(IOADDR(A_SCD_PERF_CNT_CFG));
70         /* Unfortunately, in Pass 2 we must clear all counters to knock down
71            a previous interrupt request.  This means that bus profiling
72            requires ALL of the SCD perf counters. */
73         bus_writeq((scdperfcnt & ~M_SPC_CFG_SRC1) | // keep counters 0,2,3 as is
74                    M_SPC_CFG_ENABLE |            // enable counting
75                    M_SPC_CFG_CLEAR |             // clear all counters
76                    V_SPC_CFG_SRC1(1),            // counter 1 counts cycles
77                    IOADDR(A_SCD_PERF_CNT_CFG));
78         bus_writeq(next, IOADDR(A_SCD_PERF_CNT_1));
79         /* Reset the trace buffer */
80         bus_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
81 #if 0 && defined(M_SCD_TRACE_CFG_FORCECNT)
82         /* XXXKW may want to expose control to the data-collector */
83         tb_options |= M_SCD_TRACE_CFG_FORCECNT;
84 #endif
85         bus_writeq(tb_options, IOADDR(A_SCD_TRACE_CFG));
86         sbp.tb_armed = 1;
87 }
88
89 static irqreturn_t sbprof_tb_intr(int irq, void *dev_id, struct pt_regs *regs)
90 {
91         int i;
92         DBG(printk(DEVNAME ": tb_intr\n"));
93         if (sbp.next_tb_sample < MAX_TB_SAMPLES) {
94                 /* XXX should use XKPHYS to make writes bypass L2 */
95                 u_int64_t *p = sbp.sbprof_tbbuf[sbp.next_tb_sample++];
96                 /* Read out trace */
97                 bus_writeq(M_SCD_TRACE_CFG_START_READ, IOADDR(A_SCD_TRACE_CFG));
98                 __asm__ __volatile__ ("sync" : : : "memory");
99                 /* Loop runs backwards because bundles are read out in reverse order */
100                 for (i = 256 * 6; i > 0; i -= 6) {
101                         // Subscripts decrease to put bundle in the order
102                         //   t0 lo, t0 hi, t1 lo, t1 hi, t2 lo, t2 hi
103                         p[i-1] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t2 hi
104                         p[i-2] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t2 lo
105                         p[i-3] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t1 hi
106                         p[i-4] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t1 lo
107                         p[i-5] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t0 hi
108                         p[i-6] = bus_readq(IOADDR(A_SCD_TRACE_READ)); // read t0 lo
109                 }
110                 if (!sbp.tb_enable) {
111                         DBG(printk(DEVNAME ": tb_intr shutdown\n"));
112                         bus_writeq(M_SCD_TRACE_CFG_RESET,
113                                    IOADDR(A_SCD_TRACE_CFG));
114                         sbp.tb_armed = 0;
115                         wake_up(&sbp.tb_sync);
116                 } else {
117                         arm_tb();       // knock down current interrupt and get another one later
118                 }
119         } else {
120                 /* No more trace buffer samples */
121                 DBG(printk(DEVNAME ": tb_intr full\n"));
122                 bus_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
123                 sbp.tb_armed = 0;
124                 if (!sbp.tb_enable) {
125                         wake_up(&sbp.tb_sync);
126                 }
127                 wake_up(&sbp.tb_read);
128         }
129         return IRQ_HANDLED;
130 }
131
132 static irqreturn_t sbprof_pc_intr(int irq, void *dev_id, struct pt_regs *regs)
133 {
134         printk(DEVNAME ": unexpected pc_intr");
135         return IRQ_NONE;
136 }
137
138 int sbprof_zbprof_start(struct file *filp)
139 {
140         u_int64_t scdperfcnt;
141
142         if (sbp.tb_enable)
143                 return -EBUSY;
144
145         DBG(printk(DEVNAME ": starting\n"));
146
147         sbp.tb_enable = 1;
148         sbp.next_tb_sample = 0;
149         filp->f_pos = 0;
150
151         if (request_irq
152             (K_INT_TRACE_FREEZE, sbprof_tb_intr, 0, DEVNAME " trace freeze", &sbp)) {
153                 return -EBUSY;
154         }
155         /* Make sure there isn't a perf-cnt interrupt waiting */
156         scdperfcnt = bus_readq(IOADDR(A_SCD_PERF_CNT_CFG));
157         /* Disable and clear counters, override SRC_1 */
158         bus_writeq((scdperfcnt & ~(M_SPC_CFG_SRC1 | M_SPC_CFG_ENABLE)) |
159                    M_SPC_CFG_ENABLE |
160                    M_SPC_CFG_CLEAR |
161                    V_SPC_CFG_SRC1(1),
162                    IOADDR(A_SCD_PERF_CNT_CFG));
163
164         /* We grab this interrupt to prevent others from trying to use
165            it, even though we don't want to service the interrupts
166            (they only feed into the trace-on-interrupt mechanism) */
167         if (request_irq
168             (K_INT_PERF_CNT, sbprof_pc_intr, 0, DEVNAME " scd perfcnt", &sbp)) {
169                 free_irq(K_INT_TRACE_FREEZE, &sbp);
170                 return -EBUSY;
171         }
172
173         /* I need the core to mask these, but the interrupt mapper to
174            pass them through.  I am exploiting my knowledge that
175            cp0_status masks out IP[5]. krw */
176         bus_writeq(K_INT_MAP_I3,
177                    IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MAP_BASE) +
178                           (K_INT_PERF_CNT << 3)));
179
180         /* Initialize address traps */
181         bus_writeq(0, IOADDR(A_ADDR_TRAP_UP_0));
182         bus_writeq(0, IOADDR(A_ADDR_TRAP_UP_1));
183         bus_writeq(0, IOADDR(A_ADDR_TRAP_UP_2));
184         bus_writeq(0, IOADDR(A_ADDR_TRAP_UP_3));
185
186         bus_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_0));
187         bus_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_1));
188         bus_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_2));
189         bus_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_3));
190
191         bus_writeq(0, IOADDR(A_ADDR_TRAP_CFG_0));
192         bus_writeq(0, IOADDR(A_ADDR_TRAP_CFG_1));
193         bus_writeq(0, IOADDR(A_ADDR_TRAP_CFG_2));
194         bus_writeq(0, IOADDR(A_ADDR_TRAP_CFG_3));
195
196         /* Initialize Trace Event 0-7 */
197         //                              when interrupt
198         bus_writeq(M_SCD_TREVT_INTERRUPT, IOADDR(A_SCD_TRACE_EVENT_0));
199         bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_1));
200         bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_2));
201         bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_3));
202         bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_4));
203         bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_5));
204         bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_6));
205         bus_writeq(0, IOADDR(A_SCD_TRACE_EVENT_7));
206
207         /* Initialize Trace Sequence 0-7 */
208         //                                   Start on event 0 (interrupt)
209         bus_writeq(V_SCD_TRSEQ_FUNC_START | 0x0fff,
210                    IOADDR(A_SCD_TRACE_SEQUENCE_0));
211         //                        dsamp when d used | asamp when a used
212         bus_writeq(M_SCD_TRSEQ_ASAMPLE | M_SCD_TRSEQ_DSAMPLE |
213                    K_SCD_TRSEQ_TRIGGER_ALL,
214                    IOADDR(A_SCD_TRACE_SEQUENCE_1));
215         bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_2));
216         bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_3));
217         bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_4));
218         bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_5));
219         bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_6));
220         bus_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_7));
221
222         /* Now indicate the PERF_CNT interrupt as a trace-relevant interrupt */
223         bus_writeq((1ULL << K_INT_PERF_CNT),
224                    IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_TRACE)));
225
226         arm_tb();
227
228         DBG(printk(DEVNAME ": done starting\n"));
229
230         return 0;
231 }
232
233 int sbprof_zbprof_stop(void)
234 {
235         DEFINE_WAIT(wait);
236         DBG(printk(DEVNAME ": stopping\n"));
237
238         if (sbp.tb_enable) {
239                 sbp.tb_enable = 0;
240                 /* XXXKW there is a window here where the intr handler
241                    may run, see the disable, and do the wake_up before
242                    this sleep happens. */
243                 if (sbp.tb_armed) {
244                         DBG(printk(DEVNAME ": wait for disarm\n"));
245                         prepare_to_wait(&sbp.tb_sync, &wait, TASK_INTERRUPTIBLE);
246                         schedule();
247                         finish_wait(&sbp.tb_sync, &wait);
248                         DBG(printk(DEVNAME ": disarm complete\n"));
249                 }
250                 free_irq(K_INT_TRACE_FREEZE, &sbp);
251                 free_irq(K_INT_PERF_CNT, &sbp);
252         }
253
254         DBG(printk(DEVNAME ": done stopping\n"));
255
256         return 0;
257 }
258
259 static int sbprof_tb_open(struct inode *inode, struct file *filp)
260 {
261         int minor;
262
263         minor = iminor(inode);
264         if (minor != 0) {
265                 return -ENODEV;
266         }
267         if (sbp.open) {
268                 return -EBUSY;
269         }
270
271         memset(&sbp, 0, sizeof(struct sbprof_tb));
272         sbp.sbprof_tbbuf = vmalloc(MAX_TBSAMPLE_BYTES);
273         if (!sbp.sbprof_tbbuf) {
274                 return -ENOMEM;
275         }
276         memset(sbp.sbprof_tbbuf, 0, MAX_TBSAMPLE_BYTES);
277         init_waitqueue_head(&sbp.tb_sync);
278         init_waitqueue_head(&sbp.tb_read);
279         sbp.open = 1;
280
281         return 0;
282 }
283
284 static int sbprof_tb_release(struct inode *inode, struct file *filp)
285 {
286         int minor;
287
288         minor = iminor(inode);
289         if (minor != 0 || !sbp.open) {
290                 return -ENODEV;
291         }
292
293         if (sbp.tb_armed || sbp.tb_enable) {
294                 sbprof_zbprof_stop();
295         }
296
297         vfree(sbp.sbprof_tbbuf);
298         sbp.open = 0;
299
300         return 0;
301 }
302
303 static ssize_t sbprof_tb_read(struct file *filp, char *buf,
304                               size_t size, loff_t *offp)
305 {
306         int cur_sample, sample_off, cur_count, sample_left;
307         char *src;
308         int   count   =  0;
309         char *dest    =  buf;
310         long  cur_off = *offp;
311
312         count = 0;
313         cur_sample = cur_off / TB_SAMPLE_SIZE;
314         sample_off = cur_off % TB_SAMPLE_SIZE;
315         sample_left = TB_SAMPLE_SIZE - sample_off;
316         while (size && (cur_sample < sbp.next_tb_sample)) {
317                 cur_count = size < sample_left ? size : sample_left;
318                 src = (char *)(((long)sbp.sbprof_tbbuf[cur_sample])+sample_off);
319                 copy_to_user(dest, src, cur_count);
320                 DBG(printk(DEVNAME ": read from sample %d, %d bytes\n",
321                            cur_sample, cur_count));
322                 size -= cur_count;
323                 sample_left -= cur_count;
324                 if (!sample_left) {
325                         cur_sample++;
326                         sample_off = 0;
327                         sample_left = TB_SAMPLE_SIZE;
328                 } else {
329                         sample_off += cur_count;
330                 }
331                 cur_off += cur_count;
332                 dest += cur_count;
333                 count += cur_count;
334         }
335         *offp = cur_off;
336
337         return count;
338 }
339
340 static int sbprof_tb_ioctl(struct inode *inode,
341                            struct file *filp,
342                            unsigned int command,
343                            unsigned long arg)
344 {
345         int error = 0;
346
347         switch (command) {
348         case SBPROF_ZBSTART:
349                 error = sbprof_zbprof_start(filp);
350                 break;
351         case SBPROF_ZBSTOP:
352                 error = sbprof_zbprof_stop();
353                 break;
354         case SBPROF_ZBWAITFULL:
355                 DEFINE_WAIT(wait);
356                 prepare_to_wait(&sbp.tb_read, &wait, TASK_INTERRUPTIBLE);
357                 schedule();
358                 finish_wait(&sbp.tb_read, &wait);
359                 /* XXXKW check if interrupted? */
360                 return put_user(TB_FULL, (int *) arg);
361         default:
362                 error = -EINVAL;
363                 break;
364         }
365
366         return error;
367 }
368
369 static struct file_operations sbprof_tb_fops = {
370         .owner          = THIS_MODULE,
371         .open           = sbprof_tb_open,
372         .release        = sbprof_tb_release,
373         .read           = sbprof_tb_read,
374         .ioctl          = sbprof_tb_ioctl,
375         .mmap           = NULL,
376 };
377
378 static int __init sbprof_tb_init(void)
379 {
380         if (register_chrdev(SBPROF_TB_MAJOR, DEVNAME, &sbprof_tb_fops)) {
381                 printk(KERN_WARNING DEVNAME ": initialization failed (dev %d)\n",
382                        SBPROF_TB_MAJOR);
383                 return -EIO;
384         }
385         sbp.open = 0;
386         tb_period = zbbus_mhz * 10000LL;
387         printk(KERN_INFO DEVNAME ": initialized - tb_period = %lld\n", tb_period);
388         return 0;
389 }
390
391 static void __exit sbprof_tb_cleanup(void)
392 {
393         unregister_chrdev(SBPROF_TB_MAJOR, DEVNAME);
394 }
395
396 module_init(sbprof_tb_init);
397 module_exit(sbprof_tb_cleanup);