2 * Copyright (c) 2010-2011 Imre Deak <imre.deak@nokia.com>
3 * Copyright (c) 2010-2011 Luc Verhaegen <libv@codethink.co.uk>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * Debugfs interface living in pvr/ subdirectory.
25 #include <linux/kernel.h>
26 #include <linux/debugfs.h>
27 #include <linux/vmalloc.h>
28 #include <linux/mutex.h>
29 #include <linux/uaccess.h>
32 #include "img_types.h"
33 #include "servicesext.h"
35 #include "sgxinfokm.h"
36 #include "syscommon.h"
37 #include "pvr_bridge_km.h"
39 #include "pvr_debugfs.h"
41 #include "bridged_support.h"
43 #include "pvr_trace_cmd.h"
45 struct dentry *pvr_debugfs_dir;
51 static struct PVRSRV_DEVICE_NODE *get_sgx_node(void)
53 struct SYS_DATA *sysdata;
54 struct PVRSRV_DEVICE_NODE *node;
56 if (SysAcquireData(&sysdata) != PVRSRV_OK)
59 for (node = sysdata->psDeviceNodeList; node; node = node->psNext)
60 if (node->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_SGX)
66 static int pvr_debugfs_reset(void *data, u64 val)
68 struct PVRSRV_DEVICE_NODE *node;
69 enum PVRSRV_ERROR err;
77 if (pvr_is_disabled()) {
82 node = get_sgx_node();
88 err = PVRSRVSetDevicePowerStateKM(node->sDevId.ui32DeviceIndex,
89 PVRSRV_POWER_STATE_D0);
90 if (err != PVRSRV_OK) {
95 HWRecoveryResetSGX(node, __func__);
97 SGXTestActivePowerEvent(node);
104 static int pvr_debugfs_reset_wrapper(void *data, u64 val)
108 if (var == &pvr_reset)
109 return pvr_debugfs_reset(data, val);
116 DEFINE_SIMPLE_ATTRIBUTE(pvr_debugfs_reset_fops, NULL,
117 pvr_debugfs_reset_wrapper, "%llu\n");
119 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
123 #define SGXMK_TRACE_BUFFER_SIZE 512
124 #define SGXMK_TRACE_BUF_STR_LEN 80
126 struct edm_buf_info {
132 edm_trace_print(struct PVRSRV_SGXDEV_INFO *sdev, char *dst, size_t dst_len)
141 if (!sdev->psKernelEDMStatusBufferMemInfo)
144 buf = sdev->psKernelEDMStatusBufferMemInfo->pvLinAddrKM;
147 p += scnprintf(dst + p, dst_len - p,
148 "Last SGX microkernel status code: 0x%x\n", *buf);
150 printk(KERN_DEBUG "Last SGX microkernel status code: 0x%x\n",
157 buf_end = buf + SGXMK_TRACE_BUFFER_SIZE * 4;
161 /* Dump the status values */
162 for (i = 0; i < SGXMK_TRACE_BUFFER_SIZE; i++) {
164 p += scnprintf(dst + p, dst_len - p,
165 "%3d %08X %08X %08X %08X\n",
166 i, buf[2], buf[3], buf[1], buf[0]);
168 printk(KERN_DEBUG "%3d %08X %08X %08X %08X\n",
169 i, buf[2], buf[3], buf[1], buf[0]);
175 return p > dst_len ? dst_len : p;
178 static struct edm_buf_info *
179 pvr_edm_buffer_create(struct PVRSRV_SGXDEV_INFO *sgx_info)
181 struct edm_buf_info *bi;
184 /* Take a snapshot of the EDM trace buffer */
185 size = SGXMK_TRACE_BUFFER_SIZE * SGXMK_TRACE_BUF_STR_LEN;
186 bi = vmalloc(sizeof(*bi) + size);
188 pr_err("%s: vmalloc failed!\n", __func__);
192 bi->len = edm_trace_print(sgx_info, bi->data, size);
198 pvr_edm_buffer_destroy(struct edm_buf_info *edm)
203 static int pvr_debugfs_edm_open(struct inode *inode, struct file *file)
205 struct PVRSRV_DEVICE_NODE *node;
207 node = get_sgx_node();
209 file->private_data = pvr_edm_buffer_create(node->pvDevice);
210 if (!file->private_data)
216 static int pvr_debugfs_edm_release(struct inode *inode, struct file *file)
218 pvr_edm_buffer_destroy(file->private_data);
223 static ssize_t pvr_debugfs_edm_read(struct file *file, char __user *buffer,
224 size_t count, loff_t *ppos)
226 struct edm_buf_info *bi = file->private_data;
228 return simple_read_from_buffer(buffer, count, ppos, bi->data, bi->len);
231 static const struct file_operations pvr_debugfs_edm_fops = {
232 .owner = THIS_MODULE,
233 .open = pvr_debugfs_edm_open,
234 .read = pvr_debugfs_edm_read,
235 .release = pvr_debugfs_edm_release,
237 #endif /* PVRSRV_USSE_EDM_STATUS_DEBUG */
239 #ifdef CONFIG_PVR_TRACE_CMD
241 static void *trcmd_str_buf;
242 static u8 *trcmd_snapshot;
243 static size_t trcmd_snapshot_size;
244 static int trcmd_open_cnt;
246 static int pvr_dbg_trcmd_open(struct inode *inode, struct file *file)
255 trcmd_str_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
256 if (!trcmd_str_buf) {
264 r = pvr_trcmd_create_snapshot(&trcmd_snapshot, &trcmd_snapshot_size);
267 kfree(trcmd_str_buf);
278 static int pvr_dbg_trcmd_release(struct inode *inode, struct file *file)
280 pvr_trcmd_destroy_snapshot(trcmd_snapshot);
281 kfree(trcmd_str_buf);
287 static ssize_t pvr_dbg_trcmd_read(struct file *file, char __user *buffer,
288 size_t count, loff_t *ppos)
292 ret = pvr_trcmd_print(trcmd_str_buf, max_t(size_t, PAGE_SIZE, count),
293 trcmd_snapshot, trcmd_snapshot_size, ppos);
294 if (copy_to_user(buffer, trcmd_str_buf, ret))
300 static const struct file_operations pvr_dbg_trcmd_fops = {
301 .owner = THIS_MODULE,
302 .open = pvr_dbg_trcmd_open,
303 .release = pvr_dbg_trcmd_release,
304 .read = pvr_dbg_trcmd_read,
312 pvr_debugfs_llseek_helper(struct file *filp, loff_t offset, int whence,
319 if ((offset > max) || (offset < 0))
325 if (((filp->f_pos + offset) > max) ||
326 ((filp->f_pos + offset) < 0))
329 f_pos = filp->f_pos + offset;
336 f_pos = max + offset;
350 * One shot register dump.
352 * Only in D0 can we read all registers. Our driver currently only does either
353 * D0 or D3. In D3 any register read results in a SIGBUS. There is a possibility
354 * that in D1 or possibly D2 all registers apart from [0xA08:0xA4C] can be read.
357 pvr_debugfs_regs_open(struct inode *inode, struct file *filp)
359 struct PVRSRV_DEVICE_NODE *node;
360 struct PVRSRV_SGXDEV_INFO *dev;
361 enum PVRSRV_ERROR error;
365 regs = (u32 *) __get_free_page(GFP_KERNEL);
371 if (pvr_is_disabled()) {
376 node = get_sgx_node();
381 dev = node->pvDevice;
383 error = PVRSRVSetDevicePowerStateKM(node->sDevId.ui32DeviceIndex,
384 PVRSRV_POWER_STATE_D0);
385 if (error != PVRSRV_OK) {
390 for (i = 0; i < 1024; i++)
391 regs[i] = readl(dev->pvRegsBaseKM + 4 * i);
393 filp->private_data = regs;
395 SGXTestActivePowerEvent(node);
404 pvr_debugfs_regs_release(struct inode *inode, struct file *filp)
406 free_page((unsigned long) filp->private_data);
411 #define REGS_DUMP_LINE_SIZE 17
412 #define REGS_DUMP_FORMAT "0x%03X 0x%08X\n"
415 pvr_debugfs_regs_llseek(struct file *filp, loff_t offset, int whence)
417 return pvr_debugfs_llseek_helper(filp, offset, whence,
418 1024 * REGS_DUMP_LINE_SIZE);
422 pvr_debugfs_regs_read(struct file *filp, char __user *buf, size_t size,
425 char tmp[REGS_DUMP_LINE_SIZE + 1];
426 u32 *regs = filp->private_data;
429 if ((*f_pos < 0) || (size < (sizeof(tmp) - 1)))
432 i = ((int) *f_pos) / (sizeof(tmp) - 1);
436 size = snprintf(tmp, sizeof(tmp), REGS_DUMP_FORMAT, i * 4, regs[i]);
439 if (copy_to_user(buf, tmp + *f_pos - (i * (sizeof(tmp) - 1)),
449 static const struct file_operations pvr_debugfs_regs_fops = {
450 .owner = THIS_MODULE,
451 .llseek = pvr_debugfs_regs_llseek,
452 .read = pvr_debugfs_regs_read,
453 .open = pvr_debugfs_regs_open,
454 .release = pvr_debugfs_regs_release,
460 * HW Recovery dumping support.
463 static struct mutex hwrec_mutex[1];
464 static struct timeval hwrec_time;
465 static int hwrec_open_count;
466 static DECLARE_WAIT_QUEUE_HEAD(hwrec_wait_queue);
467 static int hwrec_event;
469 /* add extra locking to keep us from overwriting things during dumping. */
470 static int hwrec_event_open_count;
471 static int hwrec_event_file_lock;
473 /* While these could get moved into PVRSRV_SGXDEV_INFO, the more future-proof
474 * way of handling hw recovery events is by providing 1 single hwrecovery dump
475 * at a time, and adding a hwrec_info debugfs file with: process information,
476 * general driver information, and the instance of the (then multicore) pvr
477 * where the hwrec event happened.
479 static u32 *hwrec_registers;
481 #ifdef CONFIG_PVR_DEBUG
482 static size_t hwrec_mem_size;
483 #define HWREC_MEM_PAGES (4 * PAGE_SIZE)
484 static unsigned long hwrec_mem_pages[HWREC_MEM_PAGES];
485 #endif /* CONFIG_PVR_DEBUG */
487 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
488 static struct edm_buf_info *hwrec_edm_buf;
492 hwrec_registers_dump(struct PVRSRV_SGXDEV_INFO *psDevInfo)
496 if (!hwrec_registers) {
497 hwrec_registers = (u32 *) __get_free_page(GFP_KERNEL);
498 if (!hwrec_registers) {
499 pr_err("%s: failed to get free page.\n", __func__);
504 for (i = 0; i < 1024; i++)
505 hwrec_registers[i] = readl(psDevInfo->pvRegsBaseKM + 4 * i);
509 hwrec_pages_free(size_t *size, unsigned long *pages)
516 for (i = 0; (i * PAGE_SIZE) < *size; i++) {
525 hwrec_pages_write(u8 *buffer, size_t size, size_t *current_size,
526 unsigned long *pages, int array_size)
532 size_t offset = *current_size & ~PAGE_MASK;
533 int page = *current_size / PAGE_SIZE;
536 if (((*current_size) / PAGE_SIZE) >= array_size) {
537 pr_err("%s: Size overrun!\n", __func__);
541 pages[page] = __get_free_page(GFP_KERNEL);
543 pr_err("%s: failed to get free page.\n",
549 if (count > (PAGE_SIZE - offset))
550 count = PAGE_SIZE - offset;
552 memcpy(((u8 *) pages[page]) + offset, buffer, count);
557 *current_size += count;
563 #ifdef CONFIG_PVR_DEBUG
567 hwrec_pages_free(&hwrec_mem_size, hwrec_mem_pages);
571 hwrec_mem_write(u8 *buffer, size_t size)
573 return hwrec_pages_write(buffer, size, &hwrec_mem_size,
574 hwrec_mem_pages, ARRAY_SIZE(hwrec_mem_pages));
578 hwrec_mem_print(char *format, ...)
584 va_start(ap, format);
585 size = vscnprintf(tmp, sizeof(tmp), format, ap);
588 return hwrec_mem_write(tmp, size);
590 #endif /* CONFIG_PVR_DEBUG */
593 * Render status buffer dumping.
595 static size_t hwrec_status_size;
596 static unsigned long hwrec_status_pages[1024];
599 hwrec_status_write(char *buffer, size_t size)
601 return hwrec_pages_write(buffer, size, &hwrec_status_size,
603 ARRAY_SIZE(hwrec_status_pages));
607 hwrec_status_free(void)
609 hwrec_pages_free(&hwrec_status_size, hwrec_status_pages);
613 hwrec_status_print(char *format, ...)
619 va_start(ap, format);
620 size = vscnprintf(tmp, sizeof(tmp), format, ap);
623 return hwrec_status_write(tmp, size);
626 #define BUF_DESC_CORRUPT (1 << 31)
628 static void add_uniq_items(struct render_state_buf_list *dst,
629 const struct render_state_buf_list *src)
633 for (i = 0; i < src->cnt; i++) {
634 const struct render_state_buf_info *sbinf = &src->info[i];
637 for (j = 0; j < dst->cnt; j++) {
638 if (sbinf->buf_id == dst->info[j].buf_id) {
639 if (memcmp(sbinf, &dst->info[j],
641 dst->info[j].type |= BUF_DESC_CORRUPT;
646 /* Bound for cnt is guaranteed by the caller */
647 dst->info[dst->cnt] = *sbinf;
653 static struct render_state_buf_list *create_merged_uniq_list(
654 struct render_state_buf_list **bl_set, int set_size)
657 struct render_state_buf_list *dbl;
661 * Create a buf list big enough to contain all elements from each
664 size = offsetof(struct render_state_buf_list, info[0]);
665 for (i = 0; i < set_size; i++) {
668 size += bl_set[i]->cnt * sizeof(bl_set[i]->info[0]);
672 dbl = kmalloc(size, GFP_KERNEL);
677 for (i = 0; i < set_size; i++) {
679 add_uniq_items(dbl, bl_set[i]);
685 static void *vmap_buf(struct PVRSRV_PER_PROCESS_DATA *proc,
686 u32 handle, off_t offset, size_t size)
688 struct PVRSRV_KERNEL_MEM_INFO *minfo;
689 struct LinuxMemArea *mem_area;
690 enum PVRSRV_ERROR err;
698 if (offset & PAGE_MASK)
701 err = PVRSRVLookupHandle(proc->psHandleBase, (void **)&minfo,
702 (void *)handle, PVRSRV_HANDLE_TYPE_MEM_INFO);
703 if (err != PVRSRV_OK)
705 if (minfo->pvLinAddrKM)
706 return minfo->pvLinAddrKM;
708 err = PVRSRVLookupOSMemHandle(proc->psHandleBase, (void *)&mem_area,
710 if (err != PVRSRV_OK)
713 start_ofs = offset & PAGE_MASK;
714 end_ofs = PAGE_ALIGN(offset + size);
715 pg_cnt = (end_ofs - start_ofs) >> PAGE_SHIFT;
716 pages = kmalloc(pg_cnt * sizeof(pages[0]), GFP_KERNEL);
719 for (i = 0; i < pg_cnt; i++) {
722 pfn = LinuxMemAreaToCpuPFN(mem_area, start_ofs);
725 pages[i] = pfn_to_page(pfn);
726 start_ofs += PAGE_SIZE;
728 map = vmap(pages, pg_cnt, VM_MAP, PAGE_KERNEL);
736 static void vunmap_buf(struct PVRSRV_PER_PROCESS_DATA *proc,
737 u32 handle, void *map)
739 struct PVRSRV_KERNEL_MEM_INFO *minfo;
740 enum PVRSRV_ERROR err;
742 err = PVRSRVLookupHandle(proc->psHandleBase, (void **)&minfo,
743 (void *)handle, PVRSRV_HANDLE_TYPE_MEM_INFO);
744 if (err != PVRSRV_OK)
746 if (minfo->pvLinAddrKM)
748 vunmap((void *)(((unsigned long)map) & PAGE_MASK));
751 static void dump_buf(void *start, size_t size, u32 type)
755 if (type & BUF_DESC_CORRUPT) {
756 type &= ~BUF_DESC_CORRUPT;
759 hwrec_status_print("<type %d%s size %d>\n", type, corr, size);
760 hwrec_status_write(start, size);
763 static struct render_state_buf_list *get_state_buf_list(
764 struct PVRSRV_PER_PROCESS_DATA *proc,
765 u32 handle, off_t offset)
767 struct PVRSRV_KERNEL_MEM_INFO *container;
768 struct render_state_buf_list *buf;
769 enum PVRSRV_ERROR err;
771 err = PVRSRVLookupHandle(proc->psHandleBase, (void **)&container,
772 (void *)handle, PVRSRV_HANDLE_TYPE_MEM_INFO);
773 if (err != PVRSRV_OK)
775 if (!container->pvLinAddrKM)
777 if (offset + sizeof(*buf) > container->ui32AllocSize)
780 buf = container->pvLinAddrKM + offset;
782 if (buf->cnt > ARRAY_SIZE(buf->info))
788 static void dump_state_buf_list(struct PVRSRV_PER_PROCESS_DATA *proc,
789 struct render_state_buf_list *bl)
796 pr_info("Dumping %d render state buffers\n", bl->cnt);
797 for (i = 0; i < bl->cnt; i++) {
798 struct render_state_buf_info *binfo;
801 binfo = &bl->info[i];
803 map = vmap_buf(proc, binfo->buf_id, binfo->offset, binfo->size);
806 dump_buf(map, binfo->size, binfo->type);
808 vunmap_buf(proc, binfo->buf_id, map);
812 static void dump_sgx_state_bufs(struct PVRSRV_PER_PROCESS_DATA *proc,
813 struct PVRSRV_SGXDEV_INFO *dev_info)
815 struct SGXMKIF_HOST_CTL __iomem *hctl = dev_info->psSGXHostCtl;
816 struct render_state_buf_list *bl_set[2] = { NULL };
817 struct render_state_buf_list *mbl;
824 handle_ta = readl(&hctl->render_state_buf_ta_handle);
825 handle_3d = readl(&hctl->render_state_buf_3d_handle);
826 bl_set[0] = get_state_buf_list(proc, handle_ta,
827 dev_info->state_buf_ofs);
829 * The two buf list can be the same if the TA and 3D phases used the
830 * same context at the time of the HWrec. In this case just ignore
833 if (handle_ta != handle_3d)
834 bl_set[1] = get_state_buf_list(proc, handle_3d,
835 dev_info->state_buf_ofs);
836 mbl = create_merged_uniq_list(bl_set, ARRAY_SIZE(bl_set));
840 dump_state_buf_list(proc, mbl);
845 pvr_hwrec_dump(struct PVRSRV_PER_PROCESS_DATA *proc_data,
846 struct PVRSRV_SGXDEV_INFO *psDevInfo)
848 mutex_lock(hwrec_mutex);
850 if (hwrec_open_count || hwrec_event_file_lock) {
851 pr_err("%s: previous hwrec dump is still locked!\n", __func__);
852 mutex_unlock(hwrec_mutex);
856 do_gettimeofday(&hwrec_time);
857 pr_info("HW Recovery dump generated at %010ld%06ld\n",
858 hwrec_time.tv_sec, hwrec_time.tv_usec);
860 hwrec_registers_dump(psDevInfo);
862 #ifdef CONFIG_PVR_DEBUG
864 mmu_hwrec_mem_dump(psDevInfo);
865 #endif /* CONFIG_PVR_DEBUG */
867 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
869 pvr_edm_buffer_destroy(hwrec_edm_buf);
870 hwrec_edm_buf = pvr_edm_buffer_create(psDevInfo);
874 dump_sgx_state_bufs(proc_data, psDevInfo);
878 mutex_unlock(hwrec_mutex);
880 wake_up_interruptible(&hwrec_wait_queue);
887 hwrec_file_open(struct inode *inode, struct file *filp)
889 mutex_lock(hwrec_mutex);
893 mutex_unlock(hwrec_mutex);
898 hwrec_file_release(struct inode *inode, struct file *filp)
900 mutex_lock(hwrec_mutex);
904 mutex_unlock(hwrec_mutex);
909 * Provides a hwrec timestamp for unique dumping.
912 hwrec_time_read(struct file *filp, char __user *buf, size_t size,
917 mutex_lock(hwrec_mutex);
918 snprintf(tmp, sizeof(tmp), "%010ld%06ld",
919 hwrec_time.tv_sec, hwrec_time.tv_usec);
920 mutex_unlock(hwrec_mutex);
922 return simple_read_from_buffer(buf, size, f_pos, tmp, strlen(tmp));
925 static const struct file_operations hwrec_time_fops = {
926 .owner = THIS_MODULE,
928 .read = hwrec_time_read,
929 .open = hwrec_file_open,
930 .release = hwrec_file_release,
934 * Blocks the reader until a HWRec happens.
937 hwrec_event_open(struct inode *inode, struct file *filp)
941 mutex_lock(hwrec_mutex);
943 if (hwrec_event_open_count)
946 hwrec_event_open_count++;
950 mutex_unlock(hwrec_mutex);
956 hwrec_event_release(struct inode *inode, struct file *filp)
958 mutex_lock(hwrec_mutex);
960 hwrec_event_open_count--;
962 mutex_unlock(hwrec_mutex);
969 hwrec_event_read(struct file *filp, char __user *buf, size_t size,
974 mutex_lock(hwrec_mutex);
976 hwrec_event_file_lock = 0;
978 mutex_unlock(hwrec_mutex);
980 ret = wait_event_interruptible(hwrec_wait_queue, hwrec_event);
982 mutex_lock(hwrec_mutex);
985 hwrec_event_file_lock = 1;
987 mutex_unlock(hwrec_mutex);
993 static const struct file_operations hwrec_event_fops = {
994 .owner = THIS_MODULE,
996 .read = hwrec_event_read,
997 .open = hwrec_event_open,
998 .release = hwrec_event_release,
1002 * Reads out all readable registers.
1005 hwrec_regs_llseek(struct file *filp, loff_t offset, int whence)
1009 mutex_lock(hwrec_mutex);
1011 if (hwrec_registers)
1012 f_pos = pvr_debugfs_llseek_helper(filp, offset, whence,
1013 1024 * REGS_DUMP_LINE_SIZE);
1017 mutex_unlock(hwrec_mutex);
1023 hwrec_regs_read(struct file *filp, char __user *buf, size_t size,
1026 char tmp[REGS_DUMP_LINE_SIZE + 1];
1029 if ((*f_pos < 0) || (size < (sizeof(tmp) - 1)))
1032 i = ((int) *f_pos) / (sizeof(tmp) - 1);
1036 mutex_lock(hwrec_mutex);
1038 if (!hwrec_registers)
1041 size = snprintf(tmp, sizeof(tmp), REGS_DUMP_FORMAT, i * 4,
1042 hwrec_registers[i]);
1044 mutex_unlock(hwrec_mutex);
1047 if (copy_to_user(buf, tmp + *f_pos - (i * (sizeof(tmp) - 1)),
1057 static const struct file_operations hwrec_regs_fops = {
1058 .owner = THIS_MODULE,
1059 .llseek = hwrec_regs_llseek,
1060 .read = hwrec_regs_read,
1061 .open = hwrec_file_open,
1062 .release = hwrec_file_release,
1065 #ifdef CONFIG_PVR_DEBUG
1067 * Provides a full context dump: page directory, page tables, and all mapped
1071 hwrec_mem_llseek(struct file *filp, loff_t offset, int whence)
1075 mutex_lock(hwrec_mutex);
1078 f_pos = pvr_debugfs_llseek_helper(filp, offset, whence,
1083 mutex_unlock(hwrec_mutex);
1089 hwrec_mem_read(struct file *filp, char __user *buf, size_t size,
1092 mutex_lock(hwrec_mutex);
1094 if ((*f_pos >= 0) && (*f_pos < hwrec_mem_size)) {
1097 size = min(size, (size_t) hwrec_mem_size - (size_t) *f_pos);
1099 page = (*f_pos) / PAGE_SIZE;
1100 offset = (*f_pos) & ~PAGE_MASK;
1102 size = min(size, (size_t) PAGE_SIZE - offset);
1104 if (copy_to_user(buf,
1105 ((u8 *) hwrec_mem_pages[page]) + offset,
1107 mutex_unlock(hwrec_mutex);
1113 mutex_unlock(hwrec_mutex);
1119 static const struct file_operations hwrec_mem_fops = {
1120 .owner = THIS_MODULE,
1121 .llseek = hwrec_mem_llseek,
1122 .read = hwrec_mem_read,
1123 .open = hwrec_file_open,
1124 .release = hwrec_file_release,
1126 #endif /* CONFIG_PVR_DEBUG */
1129 * Read out edm trace created before HW recovery reset.
1131 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1133 hwrec_edm_llseek(struct file *filp, loff_t offset, int whence)
1137 mutex_lock(hwrec_mutex);
1140 f_pos = pvr_debugfs_llseek_helper(filp, offset, whence,
1141 hwrec_edm_buf->len);
1145 mutex_unlock(hwrec_mutex);
1151 hwrec_edm_read(struct file *filp, char __user *buf, size_t size,
1156 mutex_lock(hwrec_mutex);
1159 ret = simple_read_from_buffer(buf, size, f_pos,
1160 hwrec_edm_buf->data,
1161 hwrec_edm_buf->len);
1165 mutex_unlock(hwrec_mutex);
1170 static const struct file_operations hwrec_edm_fops = {
1171 .owner = THIS_MODULE,
1172 .llseek = hwrec_edm_llseek,
1173 .read = hwrec_edm_read,
1174 .open = hwrec_file_open,
1175 .release = hwrec_file_release,
1177 #endif /* PVRSRV_USSE_EDM_STATUS_DEBUG */
1180 * Provides a dump of the TA and 3D status buffers.
1183 hwrec_status_llseek(struct file *filp, loff_t offset, int whence)
1187 mutex_lock(hwrec_mutex);
1189 if (hwrec_status_size)
1190 f_pos = pvr_debugfs_llseek_helper(filp, offset, whence,
1195 mutex_unlock(hwrec_mutex);
1201 hwrec_status_read(struct file *filp, char __user *buf, size_t size,
1204 mutex_lock(hwrec_mutex);
1206 if ((*f_pos >= 0) && (*f_pos < hwrec_status_size)) {
1209 size = min(size, (size_t) hwrec_status_size - (size_t) *f_pos);
1211 page = (*f_pos) / PAGE_SIZE;
1212 offset = (*f_pos) & ~PAGE_MASK;
1214 size = min(size, (size_t) PAGE_SIZE - offset);
1216 if (copy_to_user(buf,
1217 ((u8 *) hwrec_status_pages[page]) + offset,
1219 mutex_unlock(hwrec_mutex);
1225 mutex_unlock(hwrec_mutex);
1231 static const struct file_operations hwrec_status_fops = {
1232 .owner = THIS_MODULE,
1233 .llseek = hwrec_status_llseek,
1234 .read = hwrec_status_read,
1235 .open = hwrec_file_open,
1236 .release = hwrec_file_release,
1242 int pvr_debugfs_init(void)
1244 mutex_init(hwrec_mutex);
1246 pvr_debugfs_dir = debugfs_create_dir("pvr", NULL);
1247 if (!pvr_debugfs_dir)
1250 if (!debugfs_create_file("reset_sgx", S_IWUSR, pvr_debugfs_dir,
1251 &pvr_reset, &pvr_debugfs_reset_fops)) {
1252 debugfs_remove(pvr_debugfs_dir);
1256 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1257 if (!debugfs_create_file("edm_trace", S_IRUGO, pvr_debugfs_dir, NULL,
1258 &pvr_debugfs_edm_fops)) {
1259 debugfs_remove_recursive(pvr_debugfs_dir);
1263 #ifdef CONFIG_PVR_TRACE_CMD
1264 if (!debugfs_create_file("command_trace", S_IRUGO, pvr_debugfs_dir,
1265 NULL, &pvr_dbg_trcmd_fops)) {
1266 debugfs_remove_recursive(pvr_debugfs_dir);
1271 if (!debugfs_create_file("registers", S_IRUSR, pvr_debugfs_dir, NULL,
1272 &pvr_debugfs_regs_fops)) {
1273 debugfs_remove(pvr_debugfs_dir);
1277 if (!debugfs_create_file("hwrec_event", S_IRUSR, pvr_debugfs_dir, NULL,
1278 &hwrec_event_fops)) {
1279 debugfs_remove_recursive(pvr_debugfs_dir);
1283 if (!debugfs_create_file("hwrec_time", S_IRUSR, pvr_debugfs_dir, NULL,
1284 &hwrec_time_fops)) {
1285 debugfs_remove_recursive(pvr_debugfs_dir);
1289 if (!debugfs_create_file("hwrec_regs", S_IRUSR, pvr_debugfs_dir, NULL,
1290 &hwrec_regs_fops)) {
1291 debugfs_remove_recursive(pvr_debugfs_dir);
1295 #ifdef CONFIG_PVR_DEBUG
1296 if (!debugfs_create_file("hwrec_mem", S_IRUSR, pvr_debugfs_dir, NULL,
1298 debugfs_remove_recursive(pvr_debugfs_dir);
1301 #endif /* CONFIG_PVR_DEBUG */
1303 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1304 if (!debugfs_create_file("hwrec_edm", S_IRUSR, pvr_debugfs_dir, NULL,
1306 debugfs_remove_recursive(pvr_debugfs_dir);
1311 if (!debugfs_create_file("hwrec_status", S_IRUSR, pvr_debugfs_dir, NULL,
1312 &hwrec_status_fops)) {
1313 debugfs_remove_recursive(pvr_debugfs_dir);
1320 void pvr_debugfs_cleanup(void)
1322 debugfs_remove_recursive(pvr_debugfs_dir);
1324 if (hwrec_registers)
1325 free_page((u32) hwrec_registers);
1327 #ifdef CONFIG_PVR_DEBUG
1329 #endif /* CONFIG_PVR_DEBUG */
1331 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1333 pvr_edm_buffer_destroy(hwrec_edm_buf);
1336 hwrec_status_free();