2 * Copyright (c) 2010-2011 Imre Deak <imre.deak@nokia.com>
3 * Copyright (c) 2010-2011 Luc Verhaegen <libv@codethink.co.uk>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * Debugfs interface living in pvr/ subdirectory.
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/debugfs.h>
28 #include <linux/vmalloc.h>
29 #include <linux/mutex.h>
30 #include <linux/uaccess.h>
33 #include "img_types.h"
34 #include "servicesext.h"
36 #include "sgxinfokm.h"
37 #include "syscommon.h"
38 #include "pvr_bridge_km.h"
40 #include "pvr_debugfs.h"
42 #include "bridged_support.h"
44 #include "pvr_trace_cmd.h"
46 struct dentry *pvr_debugfs_dir;
52 static struct PVRSRV_DEVICE_NODE *get_sgx_node(void)
54 struct SYS_DATA *sysdata;
55 struct PVRSRV_DEVICE_NODE *node;
57 if (SysAcquireData(&sysdata) != PVRSRV_OK)
60 for (node = sysdata->psDeviceNodeList; node; node = node->psNext)
61 if (node->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_SGX)
67 static int pvr_debugfs_reset(void *data, u64 val)
69 struct PVRSRV_DEVICE_NODE *node;
70 enum PVRSRV_ERROR err;
78 if (pvr_is_disabled()) {
83 node = get_sgx_node();
89 err = PVRSRVSetDevicePowerStateKM(node->sDevId.ui32DeviceIndex,
90 PVRSRV_POWER_STATE_D0);
91 if (err != PVRSRV_OK) {
96 HWRecoveryResetSGX(node, __func__);
98 SGXTestActivePowerEvent(node);
105 static int pvr_debugfs_reset_wrapper(void *data, u64 val)
109 if (var == &pvr_reset)
110 return pvr_debugfs_reset(data, val);
117 DEFINE_SIMPLE_ATTRIBUTE(pvr_debugfs_reset_fops, NULL,
118 pvr_debugfs_reset_wrapper, "%llu\n");
120 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
124 #define SGXMK_TRACE_BUFFER_SIZE 512
125 #define SGXMK_TRACE_BUF_STR_LEN 80
127 struct edm_buf_info {
133 edm_trace_print(struct PVRSRV_SGXDEV_INFO *sdev, char *dst, size_t dst_len)
142 if (!sdev->psKernelEDMStatusBufferMemInfo)
145 buf = sdev->psKernelEDMStatusBufferMemInfo->pvLinAddrKM;
148 p += scnprintf(dst + p, dst_len - p,
149 "Last SGX microkernel status code: 0x%x\n", *buf);
151 printk(KERN_DEBUG "Last SGX microkernel status code: 0x%x\n",
158 buf_end = buf + SGXMK_TRACE_BUFFER_SIZE * 4;
162 /* Dump the status values */
163 for (i = 0; i < SGXMK_TRACE_BUFFER_SIZE; i++) {
165 p += scnprintf(dst + p, dst_len - p,
166 "%3d %08X %08X %08X %08X\n",
167 i, buf[2], buf[3], buf[1], buf[0]);
169 printk(KERN_DEBUG "%3d %08X %08X %08X %08X\n",
170 i, buf[2], buf[3], buf[1], buf[0]);
176 return p > dst_len ? dst_len : p;
179 static struct edm_buf_info *
180 pvr_edm_buffer_create(struct PVRSRV_SGXDEV_INFO *sgx_info)
182 struct edm_buf_info *bi;
185 /* Take a snapshot of the EDM trace buffer */
186 size = SGXMK_TRACE_BUFFER_SIZE * SGXMK_TRACE_BUF_STR_LEN;
187 bi = vmalloc(sizeof(*bi) + size);
189 pr_err("%s: vmalloc failed!\n", __func__);
193 bi->len = edm_trace_print(sgx_info, bi->data, size);
199 pvr_edm_buffer_destroy(struct edm_buf_info *edm)
204 static int pvr_debugfs_edm_open(struct inode *inode, struct file *file)
206 struct PVRSRV_DEVICE_NODE *node;
208 node = get_sgx_node();
210 file->private_data = pvr_edm_buffer_create(node->pvDevice);
211 if (!file->private_data)
217 static int pvr_debugfs_edm_release(struct inode *inode, struct file *file)
219 pvr_edm_buffer_destroy(file->private_data);
224 static ssize_t pvr_debugfs_edm_read(struct file *file, char __user *buffer,
225 size_t count, loff_t *ppos)
227 struct edm_buf_info *bi = file->private_data;
229 return simple_read_from_buffer(buffer, count, ppos, bi->data, bi->len);
232 static const struct file_operations pvr_debugfs_edm_fops = {
233 .owner = THIS_MODULE,
234 .open = pvr_debugfs_edm_open,
235 .read = pvr_debugfs_edm_read,
236 .release = pvr_debugfs_edm_release,
238 #endif /* PVRSRV_USSE_EDM_STATUS_DEBUG */
240 #ifdef CONFIG_PVR_TRACE_CMD
242 static void *trcmd_str_buf;
243 static u8 *trcmd_snapshot;
244 static size_t trcmd_snapshot_size;
245 static int trcmd_open_cnt;
247 static int pvr_dbg_trcmd_open(struct inode *inode, struct file *file)
256 trcmd_str_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
257 if (!trcmd_str_buf) {
265 r = pvr_trcmd_create_snapshot(&trcmd_snapshot, &trcmd_snapshot_size);
268 kfree(trcmd_str_buf);
279 static int pvr_dbg_trcmd_release(struct inode *inode, struct file *file)
281 pvr_trcmd_destroy_snapshot(trcmd_snapshot);
282 kfree(trcmd_str_buf);
288 static ssize_t pvr_dbg_trcmd_read(struct file *file, char __user *buffer,
289 size_t count, loff_t *ppos)
293 ret = pvr_trcmd_print(trcmd_str_buf, max_t(size_t, PAGE_SIZE, count),
294 trcmd_snapshot, trcmd_snapshot_size, ppos);
295 if (copy_to_user(buffer, trcmd_str_buf, ret))
301 static const struct file_operations pvr_dbg_trcmd_fops = {
302 .owner = THIS_MODULE,
303 .open = pvr_dbg_trcmd_open,
304 .release = pvr_dbg_trcmd_release,
305 .read = pvr_dbg_trcmd_read,
313 pvr_debugfs_llseek_helper(struct file *filp, loff_t offset, int whence,
320 if ((offset > max) || (offset < 0))
326 if (((filp->f_pos + offset) > max) ||
327 ((filp->f_pos + offset) < 0))
330 f_pos = filp->f_pos + offset;
337 f_pos = max + offset;
351 * One shot register dump.
353 * Only in D0 can we read all registers. Our driver currently only does either
354 * D0 or D3. In D3 any register read results in a SIGBUS. There is a possibility
355 * that in D1 or possibly D2 all registers apart from [0xA08:0xA4C] can be read.
358 pvr_debugfs_regs_open(struct inode *inode, struct file *filp)
360 struct PVRSRV_DEVICE_NODE *node;
361 struct PVRSRV_SGXDEV_INFO *dev;
362 enum PVRSRV_ERROR error;
366 regs = (u32 *) __get_free_page(GFP_KERNEL);
372 if (pvr_is_disabled()) {
377 node = get_sgx_node();
382 dev = node->pvDevice;
384 error = PVRSRVSetDevicePowerStateKM(node->sDevId.ui32DeviceIndex,
385 PVRSRV_POWER_STATE_D0);
386 if (error != PVRSRV_OK) {
391 for (i = 0; i < 1024; i++)
392 regs[i] = readl(dev->pvRegsBaseKM + 4 * i);
394 filp->private_data = regs;
396 SGXTestActivePowerEvent(node);
405 pvr_debugfs_regs_release(struct inode *inode, struct file *filp)
407 free_page((unsigned long) filp->private_data);
412 #define REGS_DUMP_LINE_SIZE 17
413 #define REGS_DUMP_FORMAT "0x%03X 0x%08X\n"
416 pvr_debugfs_regs_llseek(struct file *filp, loff_t offset, int whence)
418 return pvr_debugfs_llseek_helper(filp, offset, whence,
419 1024 * REGS_DUMP_LINE_SIZE);
423 pvr_debugfs_regs_read(struct file *filp, char __user *buf, size_t size,
426 char tmp[REGS_DUMP_LINE_SIZE + 1];
427 u32 *regs = filp->private_data;
430 if ((*f_pos < 0) || (size < (sizeof(tmp) - 1)))
433 i = ((int) *f_pos) / (sizeof(tmp) - 1);
437 size = snprintf(tmp, sizeof(tmp), REGS_DUMP_FORMAT, i * 4, regs[i]);
440 if (copy_to_user(buf, tmp + *f_pos - (i * (sizeof(tmp) - 1)),
450 static const struct file_operations pvr_debugfs_regs_fops = {
451 .owner = THIS_MODULE,
452 .llseek = pvr_debugfs_regs_llseek,
453 .read = pvr_debugfs_regs_read,
454 .open = pvr_debugfs_regs_open,
455 .release = pvr_debugfs_regs_release,
461 * HW Recovery dumping support.
464 static struct mutex hwrec_mutex[1];
465 static struct timeval hwrec_time;
466 static int hwrec_open_count;
467 static DECLARE_WAIT_QUEUE_HEAD(hwrec_wait_queue);
468 static int hwrec_event;
470 /* add extra locking to keep us from overwriting things during dumping. */
471 static int hwrec_event_open_count;
472 static int hwrec_event_file_lock;
474 /* While these could get moved into PVRSRV_SGXDEV_INFO, the more future-proof
475 * way of handling hw recovery events is by providing 1 single hwrecovery dump
476 * at a time, and adding a hwrec_info debugfs file with: process information,
477 * general driver information, and the instance of the (then multicore) pvr
478 * where the hwrec event happened.
480 static u32 *hwrec_registers;
482 #ifdef CONFIG_PVR_DEBUG
483 static size_t hwrec_mem_size;
484 #define HWREC_MEM_PAGES (4 * PAGE_SIZE)
485 static unsigned long hwrec_mem_pages[HWREC_MEM_PAGES];
486 #endif /* CONFIG_PVR_DEBUG */
488 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
489 static struct edm_buf_info *hwrec_edm_buf;
493 hwrec_registers_dump(struct PVRSRV_SGXDEV_INFO *psDevInfo)
497 if (!hwrec_registers) {
498 hwrec_registers = (u32 *) __get_free_page(GFP_KERNEL);
499 if (!hwrec_registers) {
500 pr_err("%s: failed to get free page.\n", __func__);
505 for (i = 0; i < 1024; i++)
506 hwrec_registers[i] = readl(psDevInfo->pvRegsBaseKM + 4 * i);
510 hwrec_pages_free(size_t *size, unsigned long *pages)
517 for (i = 0; (i * PAGE_SIZE) < *size; i++) {
526 hwrec_pages_write(u8 *buffer, size_t size, size_t *current_size,
527 unsigned long *pages, int array_size)
533 size_t offset = *current_size & ~PAGE_MASK;
534 int page = *current_size / PAGE_SIZE;
537 if (((*current_size) / PAGE_SIZE) >= array_size) {
538 pr_err("%s: Size overrun!\n", __func__);
542 pages[page] = __get_free_page(GFP_KERNEL);
544 pr_err("%s: failed to get free page.\n",
550 if (count > (PAGE_SIZE - offset))
551 count = PAGE_SIZE - offset;
553 memcpy(((u8 *) pages[page]) + offset, buffer, count);
558 *current_size += count;
564 #ifdef CONFIG_PVR_DEBUG
568 hwrec_pages_free(&hwrec_mem_size, hwrec_mem_pages);
572 hwrec_mem_write(u8 *buffer, size_t size)
574 return hwrec_pages_write(buffer, size, &hwrec_mem_size,
575 hwrec_mem_pages, ARRAY_SIZE(hwrec_mem_pages));
579 hwrec_mem_print(char *format, ...)
585 va_start(ap, format);
586 size = vscnprintf(tmp, sizeof(tmp), format, ap);
589 return hwrec_mem_write(tmp, size);
591 #endif /* CONFIG_PVR_DEBUG */
594 * Render status buffer dumping.
596 static size_t hwrec_status_size;
597 static unsigned long hwrec_status_pages[1024];
600 hwrec_status_write(char *buffer, size_t size)
602 return hwrec_pages_write(buffer, size, &hwrec_status_size,
604 ARRAY_SIZE(hwrec_status_pages));
608 hwrec_status_free(void)
610 hwrec_pages_free(&hwrec_status_size, hwrec_status_pages);
614 hwrec_status_print(char *format, ...)
620 va_start(ap, format);
621 size = vscnprintf(tmp, sizeof(tmp), format, ap);
624 return hwrec_status_write(tmp, size);
627 #define BUF_DESC_CORRUPT (1 << 31)
629 static void add_uniq_items(struct render_state_buf_list *dst,
630 const struct render_state_buf_list *src)
634 for (i = 0; i < src->cnt; i++) {
635 const struct render_state_buf_info *sbinf = &src->info[i];
638 for (j = 0; j < dst->cnt; j++) {
639 if (sbinf->buf_id == dst->info[j].buf_id) {
640 if (memcmp(sbinf, &dst->info[j],
642 dst->info[j].type |= BUF_DESC_CORRUPT;
647 /* Bound for cnt is guaranteed by the caller */
648 dst->info[dst->cnt] = *sbinf;
654 static struct render_state_buf_list *create_merged_uniq_list(
655 struct render_state_buf_list **bl_set, int set_size)
658 struct render_state_buf_list *dbl;
662 * Create a buf list big enough to contain all elements from each
665 size = offsetof(struct render_state_buf_list, info[0]);
666 for (i = 0; i < set_size; i++) {
669 size += bl_set[i]->cnt * sizeof(bl_set[i]->info[0]);
673 dbl = kmalloc(size, GFP_KERNEL);
678 for (i = 0; i < set_size; i++) {
680 add_uniq_items(dbl, bl_set[i]);
686 static void *vmap_buf(struct PVRSRV_PER_PROCESS_DATA *proc,
687 u32 handle, off_t offset, size_t size)
689 struct PVRSRV_KERNEL_MEM_INFO *minfo;
690 struct LinuxMemArea *mem_area;
691 enum PVRSRV_ERROR err;
699 if (offset & PAGE_MASK)
702 err = PVRSRVLookupHandle(proc->psHandleBase, (void **)&minfo,
703 (void *)handle, PVRSRV_HANDLE_TYPE_MEM_INFO);
704 if (err != PVRSRV_OK)
706 if (minfo->pvLinAddrKM)
707 return minfo->pvLinAddrKM;
709 err = PVRSRVLookupOSMemHandle(proc->psHandleBase, (void *)&mem_area,
711 if (err != PVRSRV_OK)
714 start_ofs = offset & PAGE_MASK;
715 end_ofs = PAGE_ALIGN(offset + size);
716 pg_cnt = (end_ofs - start_ofs) >> PAGE_SHIFT;
717 pages = kmalloc(pg_cnt * sizeof(pages[0]), GFP_KERNEL);
720 for (i = 0; i < pg_cnt; i++) {
723 pfn = LinuxMemAreaToCpuPFN(mem_area, start_ofs);
726 pages[i] = pfn_to_page(pfn);
727 start_ofs += PAGE_SIZE;
729 map = vmap(pages, pg_cnt, VM_MAP, PAGE_KERNEL);
737 static void vunmap_buf(struct PVRSRV_PER_PROCESS_DATA *proc,
738 u32 handle, void *map)
740 struct PVRSRV_KERNEL_MEM_INFO *minfo;
741 enum PVRSRV_ERROR err;
743 err = PVRSRVLookupHandle(proc->psHandleBase, (void **)&minfo,
744 (void *)handle, PVRSRV_HANDLE_TYPE_MEM_INFO);
745 if (err != PVRSRV_OK)
747 if (minfo->pvLinAddrKM)
749 vunmap((void *)(((unsigned long)map) & PAGE_MASK));
752 static void dump_buf(void *start, size_t size, u32 type)
756 if (type & BUF_DESC_CORRUPT) {
757 type &= ~BUF_DESC_CORRUPT;
760 hwrec_status_print("<type %d%s size %d>\n", type, corr, size);
761 hwrec_status_write(start, size);
764 static struct render_state_buf_list *get_state_buf_list(
765 struct PVRSRV_PER_PROCESS_DATA *proc,
766 u32 handle, off_t offset)
768 struct PVRSRV_KERNEL_MEM_INFO *container;
769 struct render_state_buf_list *buf;
770 enum PVRSRV_ERROR err;
772 err = PVRSRVLookupHandle(proc->psHandleBase, (void **)&container,
773 (void *)handle, PVRSRV_HANDLE_TYPE_MEM_INFO);
774 if (err != PVRSRV_OK)
776 if (!container->pvLinAddrKM)
778 if (offset + sizeof(*buf) > container->ui32AllocSize)
781 buf = container->pvLinAddrKM + offset;
783 if (buf->cnt > ARRAY_SIZE(buf->info))
789 static void dump_state_buf_list(struct PVRSRV_PER_PROCESS_DATA *proc,
790 struct render_state_buf_list *bl)
797 pr_info("Dumping %d render state buffers\n", bl->cnt);
798 for (i = 0; i < bl->cnt; i++) {
799 struct render_state_buf_info *binfo;
802 binfo = &bl->info[i];
804 map = vmap_buf(proc, binfo->buf_id, binfo->offset, binfo->size);
807 dump_buf(map, binfo->size, binfo->type);
809 vunmap_buf(proc, binfo->buf_id, map);
813 static void dump_sgx_state_bufs(struct PVRSRV_PER_PROCESS_DATA *proc,
814 struct PVRSRV_SGXDEV_INFO *dev_info)
816 struct SGXMKIF_HOST_CTL __iomem *hctl = dev_info->psSGXHostCtl;
817 struct render_state_buf_list *bl_set[2] = { NULL };
818 struct render_state_buf_list *mbl;
825 handle_ta = readl(&hctl->render_state_buf_ta_handle);
826 handle_3d = readl(&hctl->render_state_buf_3d_handle);
827 bl_set[0] = get_state_buf_list(proc, handle_ta,
828 dev_info->state_buf_ofs);
830 * The two buf list can be the same if the TA and 3D phases used the
831 * same context at the time of the HWrec. In this case just ignore
834 if (handle_ta != handle_3d)
835 bl_set[1] = get_state_buf_list(proc, handle_3d,
836 dev_info->state_buf_ofs);
837 mbl = create_merged_uniq_list(bl_set, ARRAY_SIZE(bl_set));
841 dump_state_buf_list(proc, mbl);
846 pvr_hwrec_dump(struct PVRSRV_PER_PROCESS_DATA *proc_data,
847 struct PVRSRV_SGXDEV_INFO *psDevInfo)
849 mutex_lock(hwrec_mutex);
851 if (hwrec_open_count || hwrec_event_file_lock) {
852 pr_err("%s: previous hwrec dump is still locked!\n", __func__);
853 mutex_unlock(hwrec_mutex);
857 do_gettimeofday(&hwrec_time);
858 pr_info("HW Recovery dump generated at %010ld%06ld\n",
859 hwrec_time.tv_sec, hwrec_time.tv_usec);
861 hwrec_registers_dump(psDevInfo);
863 #ifdef CONFIG_PVR_DEBUG
865 mmu_hwrec_mem_dump(psDevInfo);
866 #endif /* CONFIG_PVR_DEBUG */
868 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
870 pvr_edm_buffer_destroy(hwrec_edm_buf);
871 hwrec_edm_buf = pvr_edm_buffer_create(psDevInfo);
875 dump_sgx_state_bufs(proc_data, psDevInfo);
879 mutex_unlock(hwrec_mutex);
881 wake_up_interruptible(&hwrec_wait_queue);
888 hwrec_file_open(struct inode *inode, struct file *filp)
890 mutex_lock(hwrec_mutex);
894 mutex_unlock(hwrec_mutex);
899 hwrec_file_release(struct inode *inode, struct file *filp)
901 mutex_lock(hwrec_mutex);
905 mutex_unlock(hwrec_mutex);
910 * Provides a hwrec timestamp for unique dumping.
913 hwrec_time_read(struct file *filp, char __user *buf, size_t size,
918 mutex_lock(hwrec_mutex);
919 snprintf(tmp, sizeof(tmp), "%010ld%06ld",
920 hwrec_time.tv_sec, hwrec_time.tv_usec);
921 mutex_unlock(hwrec_mutex);
923 return simple_read_from_buffer(buf, size, f_pos, tmp, strlen(tmp));
926 static const struct file_operations hwrec_time_fops = {
927 .owner = THIS_MODULE,
929 .read = hwrec_time_read,
930 .open = hwrec_file_open,
931 .release = hwrec_file_release,
935 * Blocks the reader until a HWRec happens.
938 hwrec_event_open(struct inode *inode, struct file *filp)
942 mutex_lock(hwrec_mutex);
944 if (hwrec_event_open_count)
947 hwrec_event_open_count++;
951 mutex_unlock(hwrec_mutex);
957 hwrec_event_release(struct inode *inode, struct file *filp)
959 mutex_lock(hwrec_mutex);
961 hwrec_event_open_count--;
963 mutex_unlock(hwrec_mutex);
970 hwrec_event_read(struct file *filp, char __user *buf, size_t size,
975 mutex_lock(hwrec_mutex);
977 hwrec_event_file_lock = 0;
979 mutex_unlock(hwrec_mutex);
981 ret = wait_event_interruptible(hwrec_wait_queue, hwrec_event);
983 mutex_lock(hwrec_mutex);
986 hwrec_event_file_lock = 1;
988 mutex_unlock(hwrec_mutex);
994 static const struct file_operations hwrec_event_fops = {
995 .owner = THIS_MODULE,
997 .read = hwrec_event_read,
998 .open = hwrec_event_open,
999 .release = hwrec_event_release,
1003 * Reads out all readable registers.
1006 hwrec_regs_llseek(struct file *filp, loff_t offset, int whence)
1010 mutex_lock(hwrec_mutex);
1012 if (hwrec_registers)
1013 f_pos = pvr_debugfs_llseek_helper(filp, offset, whence,
1014 1024 * REGS_DUMP_LINE_SIZE);
1018 mutex_unlock(hwrec_mutex);
1024 hwrec_regs_read(struct file *filp, char __user *buf, size_t size,
1027 char tmp[REGS_DUMP_LINE_SIZE + 1];
1030 if ((*f_pos < 0) || (size < (sizeof(tmp) - 1)))
1033 i = ((int) *f_pos) / (sizeof(tmp) - 1);
1037 mutex_lock(hwrec_mutex);
1039 if (!hwrec_registers)
1042 size = snprintf(tmp, sizeof(tmp), REGS_DUMP_FORMAT, i * 4,
1043 hwrec_registers[i]);
1045 mutex_unlock(hwrec_mutex);
1048 if (copy_to_user(buf, tmp + *f_pos - (i * (sizeof(tmp) - 1)),
1058 static const struct file_operations hwrec_regs_fops = {
1059 .owner = THIS_MODULE,
1060 .llseek = hwrec_regs_llseek,
1061 .read = hwrec_regs_read,
1062 .open = hwrec_file_open,
1063 .release = hwrec_file_release,
1066 #ifdef CONFIG_PVR_DEBUG
1068 * Provides a full context dump: page directory, page tables, and all mapped
1072 hwrec_mem_llseek(struct file *filp, loff_t offset, int whence)
1076 mutex_lock(hwrec_mutex);
1079 f_pos = pvr_debugfs_llseek_helper(filp, offset, whence,
1084 mutex_unlock(hwrec_mutex);
1090 hwrec_mem_read(struct file *filp, char __user *buf, size_t size,
1093 mutex_lock(hwrec_mutex);
1095 if ((*f_pos >= 0) && (*f_pos < hwrec_mem_size)) {
1098 size = min(size, (size_t) hwrec_mem_size - (size_t) *f_pos);
1100 page = (*f_pos) / PAGE_SIZE;
1101 offset = (*f_pos) & ~PAGE_MASK;
1103 size = min(size, (size_t) PAGE_SIZE - offset);
1105 if (copy_to_user(buf,
1106 ((u8 *) hwrec_mem_pages[page]) + offset,
1108 mutex_unlock(hwrec_mutex);
1114 mutex_unlock(hwrec_mutex);
1120 static const struct file_operations hwrec_mem_fops = {
1121 .owner = THIS_MODULE,
1122 .llseek = hwrec_mem_llseek,
1123 .read = hwrec_mem_read,
1124 .open = hwrec_file_open,
1125 .release = hwrec_file_release,
1127 #endif /* CONFIG_PVR_DEBUG */
1130 * Read out edm trace created before HW recovery reset.
1132 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1134 hwrec_edm_llseek(struct file *filp, loff_t offset, int whence)
1138 mutex_lock(hwrec_mutex);
1141 f_pos = pvr_debugfs_llseek_helper(filp, offset, whence,
1142 hwrec_edm_buf->len);
1146 mutex_unlock(hwrec_mutex);
1152 hwrec_edm_read(struct file *filp, char __user *buf, size_t size,
1157 mutex_lock(hwrec_mutex);
1160 ret = simple_read_from_buffer(buf, size, f_pos,
1161 hwrec_edm_buf->data,
1162 hwrec_edm_buf->len);
1166 mutex_unlock(hwrec_mutex);
1171 static const struct file_operations hwrec_edm_fops = {
1172 .owner = THIS_MODULE,
1173 .llseek = hwrec_edm_llseek,
1174 .read = hwrec_edm_read,
1175 .open = hwrec_file_open,
1176 .release = hwrec_file_release,
1178 #endif /* PVRSRV_USSE_EDM_STATUS_DEBUG */
1181 * Provides a dump of the TA and 3D status buffers.
1184 hwrec_status_llseek(struct file *filp, loff_t offset, int whence)
1188 mutex_lock(hwrec_mutex);
1190 if (hwrec_status_size)
1191 f_pos = pvr_debugfs_llseek_helper(filp, offset, whence,
1196 mutex_unlock(hwrec_mutex);
1202 hwrec_status_read(struct file *filp, char __user *buf, size_t size,
1205 mutex_lock(hwrec_mutex);
1207 if ((*f_pos >= 0) && (*f_pos < hwrec_status_size)) {
1210 size = min(size, (size_t) hwrec_status_size - (size_t) *f_pos);
1212 page = (*f_pos) / PAGE_SIZE;
1213 offset = (*f_pos) & ~PAGE_MASK;
1215 size = min(size, (size_t) PAGE_SIZE - offset);
1217 if (copy_to_user(buf,
1218 ((u8 *) hwrec_status_pages[page]) + offset,
1220 mutex_unlock(hwrec_mutex);
1226 mutex_unlock(hwrec_mutex);
1232 static const struct file_operations hwrec_status_fops = {
1233 .owner = THIS_MODULE,
1234 .llseek = hwrec_status_llseek,
1235 .read = hwrec_status_read,
1236 .open = hwrec_file_open,
1237 .release = hwrec_file_release,
1243 int pvr_debugfs_init(void)
1245 mutex_init(hwrec_mutex);
1247 pvr_debugfs_dir = debugfs_create_dir("pvr", NULL);
1248 if (!pvr_debugfs_dir)
1251 if (!debugfs_create_file("reset_sgx", S_IWUSR, pvr_debugfs_dir,
1252 &pvr_reset, &pvr_debugfs_reset_fops)) {
1253 debugfs_remove(pvr_debugfs_dir);
1257 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1258 if (!debugfs_create_file("edm_trace", S_IRUGO, pvr_debugfs_dir, NULL,
1259 &pvr_debugfs_edm_fops)) {
1260 debugfs_remove_recursive(pvr_debugfs_dir);
1264 #ifdef CONFIG_PVR_TRACE_CMD
1265 if (!debugfs_create_file("command_trace", S_IRUGO, pvr_debugfs_dir,
1266 NULL, &pvr_dbg_trcmd_fops)) {
1267 debugfs_remove_recursive(pvr_debugfs_dir);
1272 if (!debugfs_create_file("registers", S_IRUSR, pvr_debugfs_dir, NULL,
1273 &pvr_debugfs_regs_fops)) {
1274 debugfs_remove(pvr_debugfs_dir);
1278 if (!debugfs_create_file("hwrec_event", S_IRUSR, pvr_debugfs_dir, NULL,
1279 &hwrec_event_fops)) {
1280 debugfs_remove_recursive(pvr_debugfs_dir);
1284 if (!debugfs_create_file("hwrec_time", S_IRUSR, pvr_debugfs_dir, NULL,
1285 &hwrec_time_fops)) {
1286 debugfs_remove_recursive(pvr_debugfs_dir);
1290 if (!debugfs_create_file("hwrec_regs", S_IRUSR, pvr_debugfs_dir, NULL,
1291 &hwrec_regs_fops)) {
1292 debugfs_remove_recursive(pvr_debugfs_dir);
1296 #ifdef CONFIG_PVR_DEBUG
1297 if (!debugfs_create_file("hwrec_mem", S_IRUSR, pvr_debugfs_dir, NULL,
1299 debugfs_remove_recursive(pvr_debugfs_dir);
1302 #endif /* CONFIG_PVR_DEBUG */
1304 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1305 if (!debugfs_create_file("hwrec_edm", S_IRUSR, pvr_debugfs_dir, NULL,
1307 debugfs_remove_recursive(pvr_debugfs_dir);
1312 if (!debugfs_create_file("hwrec_status", S_IRUSR, pvr_debugfs_dir, NULL,
1313 &hwrec_status_fops)) {
1314 debugfs_remove_recursive(pvr_debugfs_dir);
1321 void pvr_debugfs_cleanup(void)
1323 debugfs_remove_recursive(pvr_debugfs_dir);
1325 if (hwrec_registers)
1326 free_page((u32) hwrec_registers);
1328 #ifdef CONFIG_PVR_DEBUG
1330 #endif /* CONFIG_PVR_DEBUG */
1332 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1334 pvr_edm_buffer_destroy(hwrec_edm_buf);
1337 hwrec_status_free();