2 * Copyright (c) 2010-2011 Imre Deak <imre.deak@nokia.com>
3 * Copyright (c) 2010-2011 Luc Verhaegen <libv@codethink.co.uk>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * Debugfs interface living in pvr/ subdirectory.
25 #include <linux/kernel.h>
26 #include <linux/debugfs.h>
27 #include <linux/vmalloc.h>
28 #include <linux/mutex.h>
29 #include <linux/uaccess.h>
32 #include "img_types.h"
33 #include "servicesext.h"
35 #include "sgxinfokm.h"
36 #include "syscommon.h"
37 #include "pvr_bridge_km.h"
39 #include "pvr_debugfs.h"
41 #include "bridged_support.h"
44 struct dentry *pvr_debugfs_dir;
50 static struct PVRSRV_DEVICE_NODE *get_sgx_node(void)
52 struct SYS_DATA *sysdata;
53 struct PVRSRV_DEVICE_NODE *node;
55 if (SysAcquireData(&sysdata) != PVRSRV_OK)
58 for (node = sysdata->psDeviceNodeList; node; node = node->psNext)
59 if (node->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_SGX)
65 static int pvr_debugfs_reset(void *data, u64 val)
67 struct PVRSRV_DEVICE_NODE *node;
68 enum PVRSRV_ERROR err;
76 if (pvr_is_disabled()) {
81 node = get_sgx_node();
87 err = PVRSRVSetDevicePowerStateKM(node->sDevId.ui32DeviceIndex,
88 PVRSRV_POWER_STATE_D0);
89 if (err != PVRSRV_OK) {
94 HWRecoveryResetSGX(node, __func__);
96 SGXTestActivePowerEvent(node);
103 static int pvr_debugfs_set(void *data, u64 val)
107 if (var == &pvr_reset)
108 return pvr_debugfs_reset(data, val);
113 DEFINE_SIMPLE_ATTRIBUTE(pvr_debugfs_fops, NULL, pvr_debugfs_set, "%llu\n");
115 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
119 #define SGXMK_TRACE_BUFFER_SIZE 512
120 #define SGXMK_TRACE_BUF_STR_LEN 80
122 struct edm_buf_info {
128 edm_trace_print(struct PVRSRV_SGXDEV_INFO *sdev, char *dst, size_t dst_len)
137 if (!sdev->psKernelEDMStatusBufferMemInfo)
140 buf = sdev->psKernelEDMStatusBufferMemInfo->pvLinAddrKM;
143 p += scnprintf(dst + p, dst_len - p,
144 "Last SGX microkernel status code: 0x%x\n", *buf);
146 printk(KERN_DEBUG "Last SGX microkernel status code: 0x%x\n",
153 buf_end = buf + SGXMK_TRACE_BUFFER_SIZE * 4;
157 /* Dump the status values */
158 for (i = 0; i < SGXMK_TRACE_BUFFER_SIZE; i++) {
160 p += scnprintf(dst + p, dst_len - p,
161 "%3d %08X %08X %08X %08X\n",
162 i, buf[2], buf[3], buf[1], buf[0]);
164 printk(KERN_DEBUG "%3d %08X %08X %08X %08X\n",
165 i, buf[2], buf[3], buf[1], buf[0]);
171 return p > dst_len ? dst_len : p;
174 static struct edm_buf_info *
175 pvr_edm_buffer_create(struct PVRSRV_SGXDEV_INFO *sgx_info)
177 struct edm_buf_info *bi;
180 /* Take a snapshot of the EDM trace buffer */
181 size = SGXMK_TRACE_BUFFER_SIZE * SGXMK_TRACE_BUF_STR_LEN;
182 bi = vmalloc(sizeof(*bi) + size);
184 pr_err("%s: vmalloc failed!\n", __func__);
188 bi->len = edm_trace_print(sgx_info, bi->data, size);
194 pvr_edm_buffer_destroy(struct edm_buf_info *edm)
199 static int pvr_debugfs_edm_open(struct inode *inode, struct file *file)
201 struct PVRSRV_DEVICE_NODE *node;
203 node = get_sgx_node();
205 file->private_data = pvr_edm_buffer_create(node->pvDevice);
206 if (!file->private_data)
212 static int pvr_debugfs_edm_release(struct inode *inode, struct file *file)
214 pvr_edm_buffer_destroy(file->private_data);
219 static ssize_t pvr_debugfs_edm_read(struct file *file, char __user *buffer,
220 size_t count, loff_t *ppos)
222 struct edm_buf_info *bi = file->private_data;
224 return simple_read_from_buffer(buffer, count, ppos, bi->data, bi->len);
227 static const struct file_operations pvr_debugfs_edm_fops = {
228 .owner = THIS_MODULE,
229 .open = pvr_debugfs_edm_open,
230 .read = pvr_debugfs_edm_read,
231 .release = pvr_debugfs_edm_release,
233 #endif /* PVRSRV_USSE_EDM_STATUS_DEBUG */
237 * HW Recovery dumping support.
240 static struct mutex hwrec_mutex[1];
241 static struct timeval hwrec_time;
242 static int hwrec_open_count;
243 static DECLARE_WAIT_QUEUE_HEAD(hwrec_wait_queue);
244 static int hwrec_event;
246 /* add extra locking to keep us from overwriting things during dumping. */
247 static int hwrec_event_open_count;
248 static int hwrec_event_file_lock;
250 /* While these could get moved into PVRSRV_SGXDEV_INFO, the more future-proof
251 * way of handling hw recovery events is by providing 1 single hwrecovery dump
252 * at a time, and adding a hwrec_info debugfs file with: process information,
253 * general driver information, and the instance of the (then multicore) pvr
254 * where the hwrec event happened.
256 static u32 *hwrec_registers;
258 #ifdef CONFIG_PVR_DEBUG
259 static size_t hwrec_mem_size;
260 #define HWREC_MEM_PAGES (4 * PAGE_SIZE)
261 static unsigned long hwrec_mem_pages[HWREC_MEM_PAGES];
262 #endif /* CONFIG_PVR_DEBUG */
264 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
265 static struct edm_buf_info *hwrec_edm_buf;
269 hwrec_registers_dump(struct PVRSRV_SGXDEV_INFO *psDevInfo)
273 if (!hwrec_registers) {
274 hwrec_registers = (u32 *) __get_free_page(GFP_KERNEL);
275 if (!hwrec_registers) {
276 pr_err("%s: failed to get free page.\n", __func__);
281 for (i = 0; i < 1024; i++)
282 hwrec_registers[i] = readl(psDevInfo->pvRegsBaseKM + 4 * i);
286 hwrec_pages_free(size_t *size, u32 *pages)
293 for (i = 0; (i * PAGE_SIZE) < *size; i++) {
302 hwrec_pages_write(u8 *buffer, size_t size, size_t *current_size, u32 *pages,
309 size_t offset = *current_size & ~PAGE_MASK;
310 int page = *current_size / PAGE_SIZE;
313 if (((*current_size) / PAGE_SIZE) >= array_size) {
314 pr_err("%s: Size overrun!\n", __func__);
318 pages[page] = __get_free_page(GFP_KERNEL);
320 pr_err("%s: failed to get free page.\n",
326 if (count > (PAGE_SIZE - offset))
327 count = PAGE_SIZE - offset;
329 memcpy(((u8 *) pages[page]) + offset, buffer, count);
334 *current_size += count;
340 #ifdef CONFIG_PVR_DEBUG
344 hwrec_pages_free(&hwrec_mem_size, hwrec_mem_pages);
348 hwrec_mem_write(u8 *buffer, size_t size)
350 return hwrec_pages_write(buffer, size, &hwrec_mem_size,
351 hwrec_mem_pages, ARRAY_SIZE(hwrec_mem_pages));
355 hwrec_mem_print(char *format, ...)
361 va_start(ap, format);
362 size = vscnprintf(tmp, sizeof(tmp), format, ap);
365 return hwrec_mem_write(tmp, size);
367 #endif /* CONFIG_PVR_DEBUG */
370 * Render status buffer dumping.
372 static size_t hwrec_status_size;
373 static u32 hwrec_status_pages[1024];
376 hwrec_status_write(char *buffer, size_t size)
378 return hwrec_pages_write(buffer, size, &hwrec_status_size,
380 ARRAY_SIZE(hwrec_status_pages));
384 hwrec_status_free(void)
386 hwrec_pages_free(&hwrec_status_size, hwrec_status_pages);
390 hwrec_status_print(char *format, ...)
396 va_start(ap, format);
397 size = vscnprintf(tmp, sizeof(tmp), format, ap);
400 return hwrec_status_write(tmp, size);
403 #define BUF_DESC_CORRUPT (1 << 31)
405 static void add_uniq_items(struct render_state_buf_list *dst,
406 const struct render_state_buf_list *src)
410 for (i = 0; i < src->cnt; i++) {
411 const struct render_state_buf_info *sbinf = &src->info[i];
414 for (j = 0; j < dst->cnt; j++) {
415 if (sbinf->buf_id == dst->info[j].buf_id) {
416 if (memcmp(sbinf, &dst->info[j],
418 dst->info[j].type |= BUF_DESC_CORRUPT;
423 /* Bound for cnt is guaranteed by the caller */
424 dst->info[dst->cnt] = *sbinf;
430 static struct render_state_buf_list *create_merged_uniq_list(
431 struct render_state_buf_list **bl_set, int set_size)
434 struct render_state_buf_list *dbl;
438 * Create a buf list big enough to contain all elements from each
441 size = offsetof(struct render_state_buf_list, info[0]);
442 for (i = 0; i < set_size; i++) {
445 size += bl_set[i]->cnt * sizeof(bl_set[i]->info[0]);
449 dbl = kmalloc(size, GFP_KERNEL);
454 for (i = 0; i < set_size; i++) {
456 add_uniq_items(dbl, bl_set[i]);
462 static void *vmap_buf(struct PVRSRV_PER_PROCESS_DATA *proc,
463 u32 handle, off_t offset, size_t size)
465 struct PVRSRV_KERNEL_MEM_INFO *minfo;
466 struct LinuxMemArea *mem_area;
467 enum PVRSRV_ERROR err;
475 if (offset & PAGE_MASK)
478 err = PVRSRVLookupHandle(proc->psHandleBase, (void **)&minfo,
479 (void *)handle, PVRSRV_HANDLE_TYPE_MEM_INFO);
480 if (err != PVRSRV_OK)
482 if (minfo->pvLinAddrKM)
483 return minfo->pvLinAddrKM;
485 err = PVRSRVLookupOSMemHandle(proc->psHandleBase, (void *)&mem_area,
487 if (err != PVRSRV_OK)
490 start_ofs = offset & PAGE_MASK;
491 end_ofs = PAGE_ALIGN(offset + size);
492 pg_cnt = (end_ofs - start_ofs) >> PAGE_SHIFT;
493 pages = kmalloc(pg_cnt * sizeof(pages[0]), GFP_KERNEL);
496 for (i = 0; i < pg_cnt; i++) {
499 pfn = LinuxMemAreaToCpuPFN(mem_area, start_ofs);
502 pages[i] = pfn_to_page(pfn);
503 start_ofs += PAGE_SIZE;
505 map = vmap(pages, pg_cnt, VM_MAP, PAGE_KERNEL);
513 static void vunmap_buf(struct PVRSRV_PER_PROCESS_DATA *proc,
514 u32 handle, void *map)
516 struct PVRSRV_KERNEL_MEM_INFO *minfo;
517 enum PVRSRV_ERROR err;
519 err = PVRSRVLookupHandle(proc->psHandleBase, (void **)&minfo,
520 (void *)handle, PVRSRV_HANDLE_TYPE_MEM_INFO);
521 if (err != PVRSRV_OK)
523 if (minfo->pvLinAddrKM)
525 vunmap((void *)(((unsigned long)map) & PAGE_MASK));
528 static void dump_buf(void *start, size_t size, u32 type)
532 if (type & BUF_DESC_CORRUPT) {
533 type &= ~BUF_DESC_CORRUPT;
536 hwrec_status_print("<type %d%s size %d>\n", type, corr, size);
537 hwrec_status_write(start, size);
540 static struct render_state_buf_list *get_state_buf_list(
541 struct PVRSRV_PER_PROCESS_DATA *proc,
542 u32 handle, off_t offset)
544 struct PVRSRV_KERNEL_MEM_INFO *container;
545 struct render_state_buf_list *buf;
546 enum PVRSRV_ERROR err;
548 err = PVRSRVLookupHandle(proc->psHandleBase, (void **)&container,
549 (void *)handle, PVRSRV_HANDLE_TYPE_MEM_INFO);
550 if (err != PVRSRV_OK)
552 if (!container->pvLinAddrKM)
554 if (offset + sizeof(*buf) > container->ui32AllocSize)
557 buf = container->pvLinAddrKM + offset;
559 if (buf->cnt > ARRAY_SIZE(buf->info))
565 static void dump_state_buf_list(struct PVRSRV_PER_PROCESS_DATA *proc,
566 struct render_state_buf_list *bl)
573 pr_info("Dumping %d render state buffers\n", bl->cnt);
574 for (i = 0; i < bl->cnt; i++) {
575 struct render_state_buf_info *binfo;
578 binfo = &bl->info[i];
580 map = vmap_buf(proc, binfo->buf_id, binfo->offset, binfo->size);
583 dump_buf(map, binfo->size, binfo->type);
585 vunmap_buf(proc, binfo->buf_id, map);
589 static void dump_sgx_state_bufs(struct PVRSRV_PER_PROCESS_DATA *proc,
590 struct PVRSRV_SGXDEV_INFO *dev_info)
592 struct SGXMKIF_HOST_CTL __iomem *hctl = dev_info->psSGXHostCtl;
593 struct render_state_buf_list *bl_set[2] = { NULL };
594 struct render_state_buf_list *mbl;
601 handle_ta = readl(&hctl->render_state_buf_ta_handle);
602 handle_3d = readl(&hctl->render_state_buf_3d_handle);
603 bl_set[0] = get_state_buf_list(proc, handle_ta,
604 dev_info->state_buf_ofs);
606 * The two buf list can be the same if the TA and 3D phases used the
607 * same context at the time of the HWrec. In this case just ignore
610 if (handle_ta != handle_3d)
611 bl_set[1] = get_state_buf_list(proc, handle_3d,
612 dev_info->state_buf_ofs);
613 mbl = create_merged_uniq_list(bl_set, ARRAY_SIZE(bl_set));
617 dump_state_buf_list(proc, mbl);
622 pvr_hwrec_dump(struct PVRSRV_PER_PROCESS_DATA *proc_data,
623 struct PVRSRV_SGXDEV_INFO *psDevInfo)
625 mutex_lock(hwrec_mutex);
627 if (hwrec_open_count || hwrec_event_file_lock) {
628 pr_err("%s: previous hwrec dump is still locked!\n", __func__);
629 mutex_unlock(hwrec_mutex);
633 do_gettimeofday(&hwrec_time);
634 pr_info("HW Recovery dump generated at %010ld%06ld\n",
635 hwrec_time.tv_sec, hwrec_time.tv_usec);
637 hwrec_registers_dump(psDevInfo);
639 #ifdef CONFIG_PVR_DEBUG
641 mmu_hwrec_mem_dump(psDevInfo);
642 #endif /* CONFIG_PVR_DEBUG */
644 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
646 pvr_edm_buffer_destroy(hwrec_edm_buf);
647 hwrec_edm_buf = pvr_edm_buffer_create(psDevInfo);
651 dump_sgx_state_bufs(proc_data, psDevInfo);
655 mutex_unlock(hwrec_mutex);
657 wake_up_interruptible(&hwrec_wait_queue);
664 hwrec_file_open(struct inode *inode, struct file *filp)
666 mutex_lock(hwrec_mutex);
670 mutex_unlock(hwrec_mutex);
675 hwrec_file_release(struct inode *inode, struct file *filp)
677 mutex_lock(hwrec_mutex);
681 mutex_unlock(hwrec_mutex);
686 hwrec_llseek_helper(struct file *filp, loff_t offset, int whence, loff_t max)
692 if ((offset > max) || (offset < 0))
698 if (((filp->f_pos + offset) > max) ||
699 ((filp->f_pos + offset) < 0))
702 f_pos = filp->f_pos + offset;
709 f_pos = max + offset;
723 * Provides a hwrec timestamp for unique dumping.
726 hwrec_time_read(struct file *filp, char __user *buf, size_t size,
731 mutex_lock(hwrec_mutex);
732 snprintf(tmp, sizeof(tmp), "%010ld%06ld",
733 hwrec_time.tv_sec, hwrec_time.tv_usec);
734 mutex_unlock(hwrec_mutex);
736 return simple_read_from_buffer(buf, size, f_pos, tmp, strlen(tmp));
739 static const struct file_operations hwrec_time_fops = {
740 .owner = THIS_MODULE,
742 .read = hwrec_time_read,
743 .open = hwrec_file_open,
744 .release = hwrec_file_release,
748 * Blocks the reader until a HWRec happens.
751 hwrec_event_open(struct inode *inode, struct file *filp)
755 mutex_lock(hwrec_mutex);
757 if (hwrec_event_open_count)
760 hwrec_event_open_count++;
764 mutex_unlock(hwrec_mutex);
770 hwrec_event_release(struct inode *inode, struct file *filp)
772 mutex_lock(hwrec_mutex);
774 hwrec_event_open_count--;
776 mutex_unlock(hwrec_mutex);
783 hwrec_event_read(struct file *filp, char __user *buf, size_t size,
788 mutex_lock(hwrec_mutex);
790 hwrec_event_file_lock = 0;
792 mutex_unlock(hwrec_mutex);
794 ret = wait_event_interruptible(hwrec_wait_queue, hwrec_event);
796 mutex_lock(hwrec_mutex);
799 hwrec_event_file_lock = 1;
801 mutex_unlock(hwrec_mutex);
807 static const struct file_operations hwrec_event_fops = {
808 .owner = THIS_MODULE,
810 .read = hwrec_event_read,
811 .open = hwrec_event_open,
812 .release = hwrec_event_release,
816 * Reads out all readable registers.
818 #define HWREC_REGS_LINE_SIZE 17
821 hwrec_regs_llseek(struct file *filp, loff_t offset, int whence)
825 mutex_lock(hwrec_mutex);
828 f_pos = hwrec_llseek_helper(filp, offset, whence,
829 1024 * HWREC_REGS_LINE_SIZE);
833 mutex_unlock(hwrec_mutex);
839 hwrec_regs_read(struct file *filp, char __user *buf, size_t size,
842 char tmp[HWREC_REGS_LINE_SIZE + 1];
845 if ((*f_pos < 0) || (size < (sizeof(tmp) - 1)))
848 i = ((int) *f_pos) / (sizeof(tmp) - 1);
852 mutex_lock(hwrec_mutex);
854 if (!hwrec_registers)
857 size = snprintf(tmp, sizeof(tmp), "0x%03X 0x%08X\n", i * 4,
860 mutex_unlock(hwrec_mutex);
863 if (copy_to_user(buf, tmp + *f_pos - (i * (sizeof(tmp) - 1)),
873 static const struct file_operations hwrec_regs_fops = {
874 .owner = THIS_MODULE,
875 .llseek = hwrec_regs_llseek,
876 .read = hwrec_regs_read,
877 .open = hwrec_file_open,
878 .release = hwrec_file_release,
881 #ifdef CONFIG_PVR_DEBUG
883 * Provides a full context dump: page directory, page tables, and all mapped
887 hwrec_mem_llseek(struct file *filp, loff_t offset, int whence)
891 mutex_lock(hwrec_mutex);
894 f_pos = hwrec_llseek_helper(filp, offset, whence,
899 mutex_unlock(hwrec_mutex);
905 hwrec_mem_read(struct file *filp, char __user *buf, size_t size,
908 mutex_lock(hwrec_mutex);
910 if ((*f_pos >= 0) && (*f_pos < hwrec_mem_size)) {
913 size = min(size, (size_t) hwrec_mem_size - (size_t) *f_pos);
915 page = (*f_pos) / PAGE_SIZE;
916 offset = (*f_pos) & ~PAGE_MASK;
918 size = min(size, (size_t) PAGE_SIZE - offset);
920 if (copy_to_user(buf,
921 ((u8 *) hwrec_mem_pages[page]) + offset,
923 mutex_unlock(hwrec_mutex);
929 mutex_unlock(hwrec_mutex);
935 static const struct file_operations hwrec_mem_fops = {
936 .owner = THIS_MODULE,
937 .llseek = hwrec_mem_llseek,
938 .read = hwrec_mem_read,
939 .open = hwrec_file_open,
940 .release = hwrec_file_release,
942 #endif /* CONFIG_PVR_DEBUG */
945 * Read out edm trace created before HW recovery reset.
947 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
949 hwrec_edm_llseek(struct file *filp, loff_t offset, int whence)
953 mutex_lock(hwrec_mutex);
956 f_pos = hwrec_llseek_helper(filp, offset, whence,
961 mutex_unlock(hwrec_mutex);
967 hwrec_edm_read(struct file *filp, char __user *buf, size_t size,
972 mutex_lock(hwrec_mutex);
975 ret = simple_read_from_buffer(buf, size, f_pos,
981 mutex_unlock(hwrec_mutex);
986 static const struct file_operations hwrec_edm_fops = {
987 .owner = THIS_MODULE,
988 .llseek = hwrec_edm_llseek,
989 .read = hwrec_edm_read,
990 .open = hwrec_file_open,
991 .release = hwrec_file_release,
993 #endif /* PVRSRV_USSE_EDM_STATUS_DEBUG */
996 * Provides a dump of the TA and 3D status buffers.
999 hwrec_status_llseek(struct file *filp, loff_t offset, int whence)
1003 mutex_lock(hwrec_mutex);
1005 if (hwrec_status_size)
1006 f_pos = hwrec_llseek_helper(filp, offset, whence,
1011 mutex_unlock(hwrec_mutex);
1017 hwrec_status_read(struct file *filp, char __user *buf, size_t size,
1020 mutex_lock(hwrec_mutex);
1022 if ((*f_pos >= 0) && (*f_pos < hwrec_status_size)) {
1025 size = min(size, (size_t) hwrec_status_size - (size_t) *f_pos);
1027 page = (*f_pos) / PAGE_SIZE;
1028 offset = (*f_pos) & ~PAGE_MASK;
1030 size = min(size, (size_t) PAGE_SIZE - offset);
1032 if (copy_to_user(buf,
1033 ((u8 *) hwrec_status_pages[page]) + offset,
1035 mutex_unlock(hwrec_mutex);
1041 mutex_unlock(hwrec_mutex);
1047 static const struct file_operations hwrec_status_fops = {
1048 .owner = THIS_MODULE,
1049 .llseek = hwrec_status_llseek,
1050 .read = hwrec_status_read,
1051 .open = hwrec_file_open,
1052 .release = hwrec_file_release,
1058 int pvr_debugfs_init(void)
1060 mutex_init(hwrec_mutex);
1062 pvr_debugfs_dir = debugfs_create_dir("pvr", NULL);
1063 if (!pvr_debugfs_dir)
1066 if (!debugfs_create_file("reset_sgx", S_IWUSR, pvr_debugfs_dir,
1067 &pvr_reset, &pvr_debugfs_fops)) {
1068 debugfs_remove(pvr_debugfs_dir);
1072 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1073 if (!debugfs_create_file("edm_trace", S_IRUGO, pvr_debugfs_dir, NULL,
1074 &pvr_debugfs_edm_fops)) {
1075 debugfs_remove_recursive(pvr_debugfs_dir);
1080 if (!debugfs_create_file("hwrec_event", S_IRUSR, pvr_debugfs_dir, NULL,
1081 &hwrec_event_fops)) {
1082 debugfs_remove_recursive(pvr_debugfs_dir);
1086 if (!debugfs_create_file("hwrec_time", S_IRUSR, pvr_debugfs_dir, NULL,
1087 &hwrec_time_fops)) {
1088 debugfs_remove_recursive(pvr_debugfs_dir);
1092 if (!debugfs_create_file("hwrec_regs", S_IRUSR, pvr_debugfs_dir, NULL,
1093 &hwrec_regs_fops)) {
1094 debugfs_remove_recursive(pvr_debugfs_dir);
1098 #ifdef CONFIG_PVR_DEBUG
1099 if (!debugfs_create_file("hwrec_mem", S_IRUSR, pvr_debugfs_dir, NULL,
1101 debugfs_remove_recursive(pvr_debugfs_dir);
1104 #endif /* CONFIG_PVR_DEBUG */
1106 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1107 if (!debugfs_create_file("hwrec_edm", S_IRUSR, pvr_debugfs_dir, NULL,
1109 debugfs_remove_recursive(pvr_debugfs_dir);
1114 if (!debugfs_create_file("hwrec_status", S_IRUSR, pvr_debugfs_dir, NULL,
1115 &hwrec_status_fops)) {
1116 debugfs_remove_recursive(pvr_debugfs_dir);
1123 void pvr_debugfs_cleanup(void)
1125 debugfs_remove_recursive(pvr_debugfs_dir);
1127 if (hwrec_registers)
1128 free_page((u32) hwrec_registers);
1130 #ifdef CONFIG_PVR_DEBUG
1132 #endif /* CONFIG_PVR_DEBUG */
1134 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1136 pvr_edm_buffer_destroy(hwrec_edm_buf);
1139 hwrec_status_free();