2 * Copyright (c) 2010-2011 Imre Deak <imre.deak@nokia.com>
3 * Copyright (c) 2010-2011 Luc Verhaegen <libv@codethink.co.uk>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * Debugfs interface living in pvr/ subdirectory.
25 #include <linux/kernel.h>
26 #include <linux/debugfs.h>
27 #include <linux/vmalloc.h>
28 #include <linux/mutex.h>
29 #include <linux/uaccess.h>
32 #include "img_types.h"
33 #include "servicesext.h"
35 #include "sgxinfokm.h"
36 #include "syscommon.h"
37 #include "pvr_bridge_km.h"
39 #include "pvr_debugfs.h"
41 #include "bridged_support.h"
43 #include "pvr_trace_cmd.h"
45 struct dentry *pvr_debugfs_dir;
51 static struct PVRSRV_DEVICE_NODE *get_sgx_node(void)
53 struct SYS_DATA *sysdata;
54 struct PVRSRV_DEVICE_NODE *node;
56 if (SysAcquireData(&sysdata) != PVRSRV_OK)
59 for (node = sysdata->psDeviceNodeList; node; node = node->psNext)
60 if (node->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_SGX)
66 static int pvr_debugfs_reset(void *data, u64 val)
68 struct PVRSRV_DEVICE_NODE *node;
69 enum PVRSRV_ERROR err;
77 if (pvr_is_disabled()) {
82 node = get_sgx_node();
88 err = PVRSRVSetDevicePowerStateKM(node->sDevId.ui32DeviceIndex,
89 PVRSRV_POWER_STATE_D0);
90 if (err != PVRSRV_OK) {
95 HWRecoveryResetSGX(node, __func__);
97 SGXTestActivePowerEvent(node);
104 static int pvr_debugfs_set(void *data, u64 val)
108 if (var == &pvr_reset)
109 return pvr_debugfs_reset(data, val);
114 DEFINE_SIMPLE_ATTRIBUTE(pvr_debugfs_fops, NULL, pvr_debugfs_set, "%llu\n");
116 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
120 #define SGXMK_TRACE_BUFFER_SIZE 512
121 #define SGXMK_TRACE_BUF_STR_LEN 80
123 struct edm_buf_info {
129 edm_trace_print(struct PVRSRV_SGXDEV_INFO *sdev, char *dst, size_t dst_len)
138 if (!sdev->psKernelEDMStatusBufferMemInfo)
141 buf = sdev->psKernelEDMStatusBufferMemInfo->pvLinAddrKM;
144 p += scnprintf(dst + p, dst_len - p,
145 "Last SGX microkernel status code: 0x%x\n", *buf);
147 printk(KERN_DEBUG "Last SGX microkernel status code: 0x%x\n",
154 buf_end = buf + SGXMK_TRACE_BUFFER_SIZE * 4;
158 /* Dump the status values */
159 for (i = 0; i < SGXMK_TRACE_BUFFER_SIZE; i++) {
161 p += scnprintf(dst + p, dst_len - p,
162 "%3d %08X %08X %08X %08X\n",
163 i, buf[2], buf[3], buf[1], buf[0]);
165 printk(KERN_DEBUG "%3d %08X %08X %08X %08X\n",
166 i, buf[2], buf[3], buf[1], buf[0]);
172 return p > dst_len ? dst_len : p;
175 static struct edm_buf_info *
176 pvr_edm_buffer_create(struct PVRSRV_SGXDEV_INFO *sgx_info)
178 struct edm_buf_info *bi;
181 /* Take a snapshot of the EDM trace buffer */
182 size = SGXMK_TRACE_BUFFER_SIZE * SGXMK_TRACE_BUF_STR_LEN;
183 bi = vmalloc(sizeof(*bi) + size);
185 pr_err("%s: vmalloc failed!\n", __func__);
189 bi->len = edm_trace_print(sgx_info, bi->data, size);
195 pvr_edm_buffer_destroy(struct edm_buf_info *edm)
200 static int pvr_debugfs_edm_open(struct inode *inode, struct file *file)
202 struct PVRSRV_DEVICE_NODE *node;
204 node = get_sgx_node();
206 file->private_data = pvr_edm_buffer_create(node->pvDevice);
207 if (!file->private_data)
213 static int pvr_debugfs_edm_release(struct inode *inode, struct file *file)
215 pvr_edm_buffer_destroy(file->private_data);
220 static ssize_t pvr_debugfs_edm_read(struct file *file, char __user *buffer,
221 size_t count, loff_t *ppos)
223 struct edm_buf_info *bi = file->private_data;
225 return simple_read_from_buffer(buffer, count, ppos, bi->data, bi->len);
228 static const struct file_operations pvr_debugfs_edm_fops = {
229 .owner = THIS_MODULE,
230 .open = pvr_debugfs_edm_open,
231 .read = pvr_debugfs_edm_read,
232 .release = pvr_debugfs_edm_release,
234 #endif /* PVRSRV_USSE_EDM_STATUS_DEBUG */
236 #ifdef CONFIG_PVR_TRACE_CMD
238 static void *trcmd_str_buf;
239 static u8 *trcmd_snapshot;
240 static size_t trcmd_snapshot_size;
241 static int trcmd_open_cnt;
243 static int pvr_dbg_trcmd_open(struct inode *inode, struct file *file)
252 trcmd_str_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
253 if (!trcmd_str_buf) {
261 r = pvr_trcmd_create_snapshot(&trcmd_snapshot, &trcmd_snapshot_size);
264 kfree(trcmd_str_buf);
275 static int pvr_dbg_trcmd_release(struct inode *inode, struct file *file)
277 pvr_trcmd_destroy_snapshot(trcmd_snapshot);
278 kfree(trcmd_str_buf);
284 static ssize_t pvr_dbg_trcmd_read(struct file *file, char __user *buffer,
285 size_t count, loff_t *ppos)
289 ret = pvr_trcmd_print(trcmd_str_buf, max_t(size_t, PAGE_SIZE, count),
290 trcmd_snapshot, trcmd_snapshot_size, ppos);
291 if (copy_to_user(buffer, trcmd_str_buf, ret))
297 static const struct file_operations pvr_dbg_trcmd_fops = {
298 .owner = THIS_MODULE,
299 .open = pvr_dbg_trcmd_open,
300 .release = pvr_dbg_trcmd_release,
301 .read = pvr_dbg_trcmd_read,
307 * HW Recovery dumping support.
310 static struct mutex hwrec_mutex[1];
311 static struct timeval hwrec_time;
312 static int hwrec_open_count;
313 static DECLARE_WAIT_QUEUE_HEAD(hwrec_wait_queue);
314 static int hwrec_event;
316 /* add extra locking to keep us from overwriting things during dumping. */
317 static int hwrec_event_open_count;
318 static int hwrec_event_file_lock;
320 /* While these could get moved into PVRSRV_SGXDEV_INFO, the more future-proof
321 * way of handling hw recovery events is by providing 1 single hwrecovery dump
322 * at a time, and adding a hwrec_info debugfs file with: process information,
323 * general driver information, and the instance of the (then multicore) pvr
324 * where the hwrec event happened.
326 static u32 *hwrec_registers;
328 #ifdef CONFIG_PVR_DEBUG
329 static size_t hwrec_mem_size;
330 #define HWREC_MEM_PAGES (4 * PAGE_SIZE)
331 static unsigned long hwrec_mem_pages[HWREC_MEM_PAGES];
332 #endif /* CONFIG_PVR_DEBUG */
334 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
335 static struct edm_buf_info *hwrec_edm_buf;
339 hwrec_registers_dump(struct PVRSRV_SGXDEV_INFO *psDevInfo)
343 if (!hwrec_registers) {
344 hwrec_registers = (u32 *) __get_free_page(GFP_KERNEL);
345 if (!hwrec_registers) {
346 pr_err("%s: failed to get free page.\n", __func__);
351 for (i = 0; i < 1024; i++)
352 hwrec_registers[i] = readl(psDevInfo->pvRegsBaseKM + 4 * i);
356 hwrec_pages_free(size_t *size, u32 *pages)
363 for (i = 0; (i * PAGE_SIZE) < *size; i++) {
372 hwrec_pages_write(u8 *buffer, size_t size, size_t *current_size, u32 *pages,
379 size_t offset = *current_size & ~PAGE_MASK;
380 int page = *current_size / PAGE_SIZE;
383 if (((*current_size) / PAGE_SIZE) >= array_size) {
384 pr_err("%s: Size overrun!\n", __func__);
388 pages[page] = __get_free_page(GFP_KERNEL);
390 pr_err("%s: failed to get free page.\n",
396 if (count > (PAGE_SIZE - offset))
397 count = PAGE_SIZE - offset;
399 memcpy(((u8 *) pages[page]) + offset, buffer, count);
404 *current_size += count;
410 #ifdef CONFIG_PVR_DEBUG
414 hwrec_pages_free(&hwrec_mem_size, hwrec_mem_pages);
418 hwrec_mem_write(u8 *buffer, size_t size)
420 return hwrec_pages_write(buffer, size, &hwrec_mem_size,
421 hwrec_mem_pages, ARRAY_SIZE(hwrec_mem_pages));
425 hwrec_mem_print(char *format, ...)
431 va_start(ap, format);
432 size = vscnprintf(tmp, sizeof(tmp), format, ap);
435 return hwrec_mem_write(tmp, size);
437 #endif /* CONFIG_PVR_DEBUG */
440 * Render status buffer dumping.
442 static size_t hwrec_status_size;
443 static u32 hwrec_status_pages[1024];
446 hwrec_status_write(char *buffer, size_t size)
448 return hwrec_pages_write(buffer, size, &hwrec_status_size,
450 ARRAY_SIZE(hwrec_status_pages));
454 hwrec_status_free(void)
456 hwrec_pages_free(&hwrec_status_size, hwrec_status_pages);
460 hwrec_status_print(char *format, ...)
466 va_start(ap, format);
467 size = vscnprintf(tmp, sizeof(tmp), format, ap);
470 return hwrec_status_write(tmp, size);
473 #define BUF_DESC_CORRUPT (1 << 31)
475 static void add_uniq_items(struct render_state_buf_list *dst,
476 const struct render_state_buf_list *src)
480 for (i = 0; i < src->cnt; i++) {
481 const struct render_state_buf_info *sbinf = &src->info[i];
484 for (j = 0; j < dst->cnt; j++) {
485 if (sbinf->buf_id == dst->info[j].buf_id) {
486 if (memcmp(sbinf, &dst->info[j],
488 dst->info[j].type |= BUF_DESC_CORRUPT;
493 /* Bound for cnt is guaranteed by the caller */
494 dst->info[dst->cnt] = *sbinf;
500 static struct render_state_buf_list *create_merged_uniq_list(
501 struct render_state_buf_list **bl_set, int set_size)
504 struct render_state_buf_list *dbl;
508 * Create a buf list big enough to contain all elements from each
511 size = offsetof(struct render_state_buf_list, info[0]);
512 for (i = 0; i < set_size; i++) {
515 size += bl_set[i]->cnt * sizeof(bl_set[i]->info[0]);
519 dbl = kmalloc(size, GFP_KERNEL);
524 for (i = 0; i < set_size; i++) {
526 add_uniq_items(dbl, bl_set[i]);
532 static void *vmap_buf(struct PVRSRV_PER_PROCESS_DATA *proc,
533 u32 handle, off_t offset, size_t size)
535 struct PVRSRV_KERNEL_MEM_INFO *minfo;
536 struct LinuxMemArea *mem_area;
537 enum PVRSRV_ERROR err;
545 if (offset & PAGE_MASK)
548 err = PVRSRVLookupHandle(proc->psHandleBase, (void **)&minfo,
549 (void *)handle, PVRSRV_HANDLE_TYPE_MEM_INFO);
550 if (err != PVRSRV_OK)
552 if (minfo->pvLinAddrKM)
553 return minfo->pvLinAddrKM;
555 err = PVRSRVLookupOSMemHandle(proc->psHandleBase, (void *)&mem_area,
557 if (err != PVRSRV_OK)
560 start_ofs = offset & PAGE_MASK;
561 end_ofs = PAGE_ALIGN(offset + size);
562 pg_cnt = (end_ofs - start_ofs) >> PAGE_SHIFT;
563 pages = kmalloc(pg_cnt * sizeof(pages[0]), GFP_KERNEL);
566 for (i = 0; i < pg_cnt; i++) {
569 pfn = LinuxMemAreaToCpuPFN(mem_area, start_ofs);
572 pages[i] = pfn_to_page(pfn);
573 start_ofs += PAGE_SIZE;
575 map = vmap(pages, pg_cnt, VM_MAP, PAGE_KERNEL);
583 static void vunmap_buf(struct PVRSRV_PER_PROCESS_DATA *proc,
584 u32 handle, void *map)
586 struct PVRSRV_KERNEL_MEM_INFO *minfo;
587 enum PVRSRV_ERROR err;
589 err = PVRSRVLookupHandle(proc->psHandleBase, (void **)&minfo,
590 (void *)handle, PVRSRV_HANDLE_TYPE_MEM_INFO);
591 if (err != PVRSRV_OK)
593 if (minfo->pvLinAddrKM)
595 vunmap((void *)(((unsigned long)map) & PAGE_MASK));
598 static void dump_buf(void *start, size_t size, u32 type)
602 if (type & BUF_DESC_CORRUPT) {
603 type &= ~BUF_DESC_CORRUPT;
606 hwrec_status_print("<type %d%s size %d>\n", type, corr, size);
607 hwrec_status_write(start, size);
610 static struct render_state_buf_list *get_state_buf_list(
611 struct PVRSRV_PER_PROCESS_DATA *proc,
612 u32 handle, off_t offset)
614 struct PVRSRV_KERNEL_MEM_INFO *container;
615 struct render_state_buf_list *buf;
616 enum PVRSRV_ERROR err;
618 err = PVRSRVLookupHandle(proc->psHandleBase, (void **)&container,
619 (void *)handle, PVRSRV_HANDLE_TYPE_MEM_INFO);
620 if (err != PVRSRV_OK)
622 if (!container->pvLinAddrKM)
624 if (offset + sizeof(*buf) > container->ui32AllocSize)
627 buf = container->pvLinAddrKM + offset;
629 if (buf->cnt > ARRAY_SIZE(buf->info))
635 static void dump_state_buf_list(struct PVRSRV_PER_PROCESS_DATA *proc,
636 struct render_state_buf_list *bl)
643 pr_info("Dumping %d render state buffers\n", bl->cnt);
644 for (i = 0; i < bl->cnt; i++) {
645 struct render_state_buf_info *binfo;
648 binfo = &bl->info[i];
650 map = vmap_buf(proc, binfo->buf_id, binfo->offset, binfo->size);
653 dump_buf(map, binfo->size, binfo->type);
655 vunmap_buf(proc, binfo->buf_id, map);
659 static void dump_sgx_state_bufs(struct PVRSRV_PER_PROCESS_DATA *proc,
660 struct PVRSRV_SGXDEV_INFO *dev_info)
662 struct SGXMKIF_HOST_CTL __iomem *hctl = dev_info->psSGXHostCtl;
663 struct render_state_buf_list *bl_set[2] = { NULL };
664 struct render_state_buf_list *mbl;
671 handle_ta = readl(&hctl->render_state_buf_ta_handle);
672 handle_3d = readl(&hctl->render_state_buf_3d_handle);
673 bl_set[0] = get_state_buf_list(proc, handle_ta,
674 dev_info->state_buf_ofs);
676 * The two buf list can be the same if the TA and 3D phases used the
677 * same context at the time of the HWrec. In this case just ignore
680 if (handle_ta != handle_3d)
681 bl_set[1] = get_state_buf_list(proc, handle_3d,
682 dev_info->state_buf_ofs);
683 mbl = create_merged_uniq_list(bl_set, ARRAY_SIZE(bl_set));
687 dump_state_buf_list(proc, mbl);
692 pvr_hwrec_dump(struct PVRSRV_PER_PROCESS_DATA *proc_data,
693 struct PVRSRV_SGXDEV_INFO *psDevInfo)
695 mutex_lock(hwrec_mutex);
697 if (hwrec_open_count || hwrec_event_file_lock) {
698 pr_err("%s: previous hwrec dump is still locked!\n", __func__);
699 mutex_unlock(hwrec_mutex);
703 do_gettimeofday(&hwrec_time);
704 pr_info("HW Recovery dump generated at %010ld%06ld\n",
705 hwrec_time.tv_sec, hwrec_time.tv_usec);
707 hwrec_registers_dump(psDevInfo);
709 #ifdef CONFIG_PVR_DEBUG
711 mmu_hwrec_mem_dump(psDevInfo);
712 #endif /* CONFIG_PVR_DEBUG */
714 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
716 pvr_edm_buffer_destroy(hwrec_edm_buf);
717 hwrec_edm_buf = pvr_edm_buffer_create(psDevInfo);
721 dump_sgx_state_bufs(proc_data, psDevInfo);
725 mutex_unlock(hwrec_mutex);
727 wake_up_interruptible(&hwrec_wait_queue);
734 hwrec_file_open(struct inode *inode, struct file *filp)
736 mutex_lock(hwrec_mutex);
740 mutex_unlock(hwrec_mutex);
745 hwrec_file_release(struct inode *inode, struct file *filp)
747 mutex_lock(hwrec_mutex);
751 mutex_unlock(hwrec_mutex);
756 hwrec_llseek_helper(struct file *filp, loff_t offset, int whence, loff_t max)
762 if ((offset > max) || (offset < 0))
768 if (((filp->f_pos + offset) > max) ||
769 ((filp->f_pos + offset) < 0))
772 f_pos = filp->f_pos + offset;
779 f_pos = max + offset;
793 * Provides a hwrec timestamp for unique dumping.
796 hwrec_time_read(struct file *filp, char __user *buf, size_t size,
801 mutex_lock(hwrec_mutex);
802 snprintf(tmp, sizeof(tmp), "%010ld%06ld",
803 hwrec_time.tv_sec, hwrec_time.tv_usec);
804 mutex_unlock(hwrec_mutex);
806 return simple_read_from_buffer(buf, size, f_pos, tmp, strlen(tmp));
809 static const struct file_operations hwrec_time_fops = {
810 .owner = THIS_MODULE,
812 .read = hwrec_time_read,
813 .open = hwrec_file_open,
814 .release = hwrec_file_release,
818 * Blocks the reader until a HWRec happens.
821 hwrec_event_open(struct inode *inode, struct file *filp)
825 mutex_lock(hwrec_mutex);
827 if (hwrec_event_open_count)
830 hwrec_event_open_count++;
834 mutex_unlock(hwrec_mutex);
840 hwrec_event_release(struct inode *inode, struct file *filp)
842 mutex_lock(hwrec_mutex);
844 hwrec_event_open_count--;
846 mutex_unlock(hwrec_mutex);
853 hwrec_event_read(struct file *filp, char __user *buf, size_t size,
858 mutex_lock(hwrec_mutex);
860 hwrec_event_file_lock = 0;
862 mutex_unlock(hwrec_mutex);
864 ret = wait_event_interruptible(hwrec_wait_queue, hwrec_event);
866 mutex_lock(hwrec_mutex);
869 hwrec_event_file_lock = 1;
871 mutex_unlock(hwrec_mutex);
877 static const struct file_operations hwrec_event_fops = {
878 .owner = THIS_MODULE,
880 .read = hwrec_event_read,
881 .open = hwrec_event_open,
882 .release = hwrec_event_release,
886 * Reads out all readable registers.
888 #define HWREC_REGS_LINE_SIZE 17
891 hwrec_regs_llseek(struct file *filp, loff_t offset, int whence)
895 mutex_lock(hwrec_mutex);
898 f_pos = hwrec_llseek_helper(filp, offset, whence,
899 1024 * HWREC_REGS_LINE_SIZE);
903 mutex_unlock(hwrec_mutex);
909 hwrec_regs_read(struct file *filp, char __user *buf, size_t size,
912 char tmp[HWREC_REGS_LINE_SIZE + 1];
915 if ((*f_pos < 0) || (size < (sizeof(tmp) - 1)))
918 i = ((int) *f_pos) / (sizeof(tmp) - 1);
922 mutex_lock(hwrec_mutex);
924 if (!hwrec_registers)
927 size = snprintf(tmp, sizeof(tmp), "0x%03X 0x%08X\n", i * 4,
930 mutex_unlock(hwrec_mutex);
933 if (copy_to_user(buf, tmp + *f_pos - (i * (sizeof(tmp) - 1)),
943 static const struct file_operations hwrec_regs_fops = {
944 .owner = THIS_MODULE,
945 .llseek = hwrec_regs_llseek,
946 .read = hwrec_regs_read,
947 .open = hwrec_file_open,
948 .release = hwrec_file_release,
951 #ifdef CONFIG_PVR_DEBUG
953 * Provides a full context dump: page directory, page tables, and all mapped
957 hwrec_mem_llseek(struct file *filp, loff_t offset, int whence)
961 mutex_lock(hwrec_mutex);
964 f_pos = hwrec_llseek_helper(filp, offset, whence,
969 mutex_unlock(hwrec_mutex);
975 hwrec_mem_read(struct file *filp, char __user *buf, size_t size,
978 mutex_lock(hwrec_mutex);
980 if ((*f_pos >= 0) && (*f_pos < hwrec_mem_size)) {
983 size = min(size, (size_t) hwrec_mem_size - (size_t) *f_pos);
985 page = (*f_pos) / PAGE_SIZE;
986 offset = (*f_pos) & ~PAGE_MASK;
988 size = min(size, (size_t) PAGE_SIZE - offset);
990 if (copy_to_user(buf,
991 ((u8 *) hwrec_mem_pages[page]) + offset,
993 mutex_unlock(hwrec_mutex);
999 mutex_unlock(hwrec_mutex);
1005 static const struct file_operations hwrec_mem_fops = {
1006 .owner = THIS_MODULE,
1007 .llseek = hwrec_mem_llseek,
1008 .read = hwrec_mem_read,
1009 .open = hwrec_file_open,
1010 .release = hwrec_file_release,
1012 #endif /* CONFIG_PVR_DEBUG */
1015 * Read out edm trace created before HW recovery reset.
1017 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1019 hwrec_edm_llseek(struct file *filp, loff_t offset, int whence)
1023 mutex_lock(hwrec_mutex);
1026 f_pos = hwrec_llseek_helper(filp, offset, whence,
1027 hwrec_edm_buf->len);
1031 mutex_unlock(hwrec_mutex);
1037 hwrec_edm_read(struct file *filp, char __user *buf, size_t size,
1042 mutex_lock(hwrec_mutex);
1045 ret = simple_read_from_buffer(buf, size, f_pos,
1046 hwrec_edm_buf->data,
1047 hwrec_edm_buf->len);
1051 mutex_unlock(hwrec_mutex);
1056 static const struct file_operations hwrec_edm_fops = {
1057 .owner = THIS_MODULE,
1058 .llseek = hwrec_edm_llseek,
1059 .read = hwrec_edm_read,
1060 .open = hwrec_file_open,
1061 .release = hwrec_file_release,
1063 #endif /* PVRSRV_USSE_EDM_STATUS_DEBUG */
1066 * Provides a dump of the TA and 3D status buffers.
1069 hwrec_status_llseek(struct file *filp, loff_t offset, int whence)
1073 mutex_lock(hwrec_mutex);
1075 if (hwrec_status_size)
1076 f_pos = hwrec_llseek_helper(filp, offset, whence,
1081 mutex_unlock(hwrec_mutex);
1087 hwrec_status_read(struct file *filp, char __user *buf, size_t size,
1090 mutex_lock(hwrec_mutex);
1092 if ((*f_pos >= 0) && (*f_pos < hwrec_status_size)) {
1095 size = min(size, (size_t) hwrec_status_size - (size_t) *f_pos);
1097 page = (*f_pos) / PAGE_SIZE;
1098 offset = (*f_pos) & ~PAGE_MASK;
1100 size = min(size, (size_t) PAGE_SIZE - offset);
1102 if (copy_to_user(buf,
1103 ((u8 *) hwrec_status_pages[page]) + offset,
1105 mutex_unlock(hwrec_mutex);
1111 mutex_unlock(hwrec_mutex);
1117 static const struct file_operations hwrec_status_fops = {
1118 .owner = THIS_MODULE,
1119 .llseek = hwrec_status_llseek,
1120 .read = hwrec_status_read,
1121 .open = hwrec_file_open,
1122 .release = hwrec_file_release,
1128 int pvr_debugfs_init(void)
1130 mutex_init(hwrec_mutex);
1132 pvr_debugfs_dir = debugfs_create_dir("pvr", NULL);
1133 if (!pvr_debugfs_dir)
1136 if (!debugfs_create_file("reset_sgx", S_IWUSR, pvr_debugfs_dir,
1137 &pvr_reset, &pvr_debugfs_fops)) {
1138 debugfs_remove(pvr_debugfs_dir);
1142 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1143 if (!debugfs_create_file("edm_trace", S_IRUGO, pvr_debugfs_dir, NULL,
1144 &pvr_debugfs_edm_fops)) {
1145 debugfs_remove_recursive(pvr_debugfs_dir);
1149 #ifdef CONFIG_PVR_TRACE_CMD
1150 if (!debugfs_create_file("command_trace", S_IRUGO, pvr_debugfs_dir,
1151 NULL, &pvr_dbg_trcmd_fops)) {
1152 debugfs_remove_recursive(pvr_debugfs_dir);
1156 if (!debugfs_create_file("hwrec_event", S_IRUSR, pvr_debugfs_dir, NULL,
1157 &hwrec_event_fops)) {
1158 debugfs_remove_recursive(pvr_debugfs_dir);
1162 if (!debugfs_create_file("hwrec_time", S_IRUSR, pvr_debugfs_dir, NULL,
1163 &hwrec_time_fops)) {
1164 debugfs_remove_recursive(pvr_debugfs_dir);
1168 if (!debugfs_create_file("hwrec_regs", S_IRUSR, pvr_debugfs_dir, NULL,
1169 &hwrec_regs_fops)) {
1170 debugfs_remove_recursive(pvr_debugfs_dir);
1174 #ifdef CONFIG_PVR_DEBUG
1175 if (!debugfs_create_file("hwrec_mem", S_IRUSR, pvr_debugfs_dir, NULL,
1177 debugfs_remove_recursive(pvr_debugfs_dir);
1180 #endif /* CONFIG_PVR_DEBUG */
1182 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1183 if (!debugfs_create_file("hwrec_edm", S_IRUSR, pvr_debugfs_dir, NULL,
1185 debugfs_remove_recursive(pvr_debugfs_dir);
1190 if (!debugfs_create_file("hwrec_status", S_IRUSR, pvr_debugfs_dir, NULL,
1191 &hwrec_status_fops)) {
1192 debugfs_remove_recursive(pvr_debugfs_dir);
1199 void pvr_debugfs_cleanup(void)
1201 debugfs_remove_recursive(pvr_debugfs_dir);
1203 if (hwrec_registers)
1204 free_page((u32) hwrec_registers);
1206 #ifdef CONFIG_PVR_DEBUG
1208 #endif /* CONFIG_PVR_DEBUG */
1210 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1212 pvr_edm_buffer_destroy(hwrec_edm_buf);
1215 hwrec_status_free();