2 * Copyright (c) 2010-2011 Imre Deak <imre.deak@nokia.com>
3 * Copyright (c) 2010-2011 Luc Verhaegen <libv@codethink.co.uk>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * Debugfs interface living in pvr/ subdirectory.
25 #include <linux/kernel.h>
26 #include <linux/debugfs.h>
27 #include <linux/vmalloc.h>
28 #include <linux/mutex.h>
29 #include <linux/uaccess.h>
32 #include "img_types.h"
33 #include "servicesext.h"
35 #include "sgxinfokm.h"
36 #include "syscommon.h"
37 #include "pvr_bridge_km.h"
39 #include "pvr_debugfs.h"
41 #include "bridged_support.h"
43 #include "pvr_trace_cmd.h"
45 struct dentry *pvr_debugfs_dir;
51 static struct PVRSRV_DEVICE_NODE *get_sgx_node(void)
53 struct SYS_DATA *sysdata;
54 struct PVRSRV_DEVICE_NODE *node;
56 if (SysAcquireData(&sysdata) != PVRSRV_OK)
59 for (node = sysdata->psDeviceNodeList; node; node = node->psNext)
60 if (node->sDevId.eDeviceType == PVRSRV_DEVICE_TYPE_SGX)
66 static int pvr_debugfs_reset(void *data, u64 val)
68 struct PVRSRV_DEVICE_NODE *node;
69 enum PVRSRV_ERROR err;
77 if (pvr_is_disabled()) {
82 node = get_sgx_node();
88 err = PVRSRVSetDevicePowerStateKM(node->sDevId.ui32DeviceIndex,
89 PVRSRV_POWER_STATE_D0);
90 if (err != PVRSRV_OK) {
95 HWRecoveryResetSGX(node, __func__);
97 SGXTestActivePowerEvent(node);
104 static int pvr_debugfs_reset_wrapper(void *data, u64 val)
108 if (var == &pvr_reset)
109 return pvr_debugfs_reset(data, val);
114 DEFINE_SIMPLE_ATTRIBUTE(pvr_debugfs_reset_fops, NULL,
115 pvr_debugfs_reset_wrapper, "%llu\n");
117 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
121 #define SGXMK_TRACE_BUFFER_SIZE 512
122 #define SGXMK_TRACE_BUF_STR_LEN 80
124 struct edm_buf_info {
130 edm_trace_print(struct PVRSRV_SGXDEV_INFO *sdev, char *dst, size_t dst_len)
139 if (!sdev->psKernelEDMStatusBufferMemInfo)
142 buf = sdev->psKernelEDMStatusBufferMemInfo->pvLinAddrKM;
145 p += scnprintf(dst + p, dst_len - p,
146 "Last SGX microkernel status code: 0x%x\n", *buf);
148 printk(KERN_DEBUG "Last SGX microkernel status code: 0x%x\n",
155 buf_end = buf + SGXMK_TRACE_BUFFER_SIZE * 4;
159 /* Dump the status values */
160 for (i = 0; i < SGXMK_TRACE_BUFFER_SIZE; i++) {
162 p += scnprintf(dst + p, dst_len - p,
163 "%3d %08X %08X %08X %08X\n",
164 i, buf[2], buf[3], buf[1], buf[0]);
166 printk(KERN_DEBUG "%3d %08X %08X %08X %08X\n",
167 i, buf[2], buf[3], buf[1], buf[0]);
173 return p > dst_len ? dst_len : p;
176 static struct edm_buf_info *
177 pvr_edm_buffer_create(struct PVRSRV_SGXDEV_INFO *sgx_info)
179 struct edm_buf_info *bi;
182 /* Take a snapshot of the EDM trace buffer */
183 size = SGXMK_TRACE_BUFFER_SIZE * SGXMK_TRACE_BUF_STR_LEN;
184 bi = vmalloc(sizeof(*bi) + size);
186 pr_err("%s: vmalloc failed!\n", __func__);
190 bi->len = edm_trace_print(sgx_info, bi->data, size);
196 pvr_edm_buffer_destroy(struct edm_buf_info *edm)
201 static int pvr_debugfs_edm_open(struct inode *inode, struct file *file)
203 struct PVRSRV_DEVICE_NODE *node;
205 node = get_sgx_node();
207 file->private_data = pvr_edm_buffer_create(node->pvDevice);
208 if (!file->private_data)
214 static int pvr_debugfs_edm_release(struct inode *inode, struct file *file)
216 pvr_edm_buffer_destroy(file->private_data);
221 static ssize_t pvr_debugfs_edm_read(struct file *file, char __user *buffer,
222 size_t count, loff_t *ppos)
224 struct edm_buf_info *bi = file->private_data;
226 return simple_read_from_buffer(buffer, count, ppos, bi->data, bi->len);
229 static const struct file_operations pvr_debugfs_edm_fops = {
230 .owner = THIS_MODULE,
231 .open = pvr_debugfs_edm_open,
232 .read = pvr_debugfs_edm_read,
233 .release = pvr_debugfs_edm_release,
235 #endif /* PVRSRV_USSE_EDM_STATUS_DEBUG */
237 #ifdef CONFIG_PVR_TRACE_CMD
239 static void *trcmd_str_buf;
240 static u8 *trcmd_snapshot;
241 static size_t trcmd_snapshot_size;
242 static int trcmd_open_cnt;
244 static int pvr_dbg_trcmd_open(struct inode *inode, struct file *file)
253 trcmd_str_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
254 if (!trcmd_str_buf) {
262 r = pvr_trcmd_create_snapshot(&trcmd_snapshot, &trcmd_snapshot_size);
265 kfree(trcmd_str_buf);
276 static int pvr_dbg_trcmd_release(struct inode *inode, struct file *file)
278 pvr_trcmd_destroy_snapshot(trcmd_snapshot);
279 kfree(trcmd_str_buf);
285 static ssize_t pvr_dbg_trcmd_read(struct file *file, char __user *buffer,
286 size_t count, loff_t *ppos)
290 ret = pvr_trcmd_print(trcmd_str_buf, max_t(size_t, PAGE_SIZE, count),
291 trcmd_snapshot, trcmd_snapshot_size, ppos);
292 if (copy_to_user(buffer, trcmd_str_buf, ret))
298 static const struct file_operations pvr_dbg_trcmd_fops = {
299 .owner = THIS_MODULE,
300 .open = pvr_dbg_trcmd_open,
301 .release = pvr_dbg_trcmd_release,
302 .read = pvr_dbg_trcmd_read,
310 pvr_debugfs_llseek_helper(struct file *filp, loff_t offset, int whence,
317 if ((offset > max) || (offset < 0))
323 if (((filp->f_pos + offset) > max) ||
324 ((filp->f_pos + offset) < 0))
327 f_pos = filp->f_pos + offset;
334 f_pos = max + offset;
348 * One shot register dump.
350 * Only in D0 can we read all registers. Our driver currently only does either
351 * D0 or D3. In D3 any register read results in a SIGBUS. There is a possibility
352 * that in D1 or possibly D2 all registers apart from [0xA08:0xA4C] can be read.
355 pvr_debugfs_regs_open(struct inode *inode, struct file *filp)
357 struct PVRSRV_DEVICE_NODE *node;
358 struct PVRSRV_SGXDEV_INFO *dev;
359 enum PVRSRV_ERROR error;
363 regs = (u32 *) __get_free_page(GFP_KERNEL);
369 if (pvr_is_disabled()) {
374 node = get_sgx_node();
379 dev = node->pvDevice;
381 error = PVRSRVSetDevicePowerStateKM(node->sDevId.ui32DeviceIndex,
382 PVRSRV_POWER_STATE_D0);
383 if (error != PVRSRV_OK) {
388 for (i = 0; i < 1024; i++)
389 regs[i] = readl(dev->pvRegsBaseKM + 4 * i);
391 filp->private_data = regs;
393 SGXTestActivePowerEvent(node);
402 pvr_debugfs_regs_release(struct inode *inode, struct file *filp)
404 free_page((unsigned long) filp->private_data);
409 #define REGS_DUMP_LINE_SIZE 17
410 #define REGS_DUMP_FORMAT "0x%03X 0x%08X\n"
413 pvr_debugfs_regs_llseek(struct file *filp, loff_t offset, int whence)
415 return pvr_debugfs_llseek_helper(filp, offset, whence,
416 1024 * REGS_DUMP_LINE_SIZE);
420 pvr_debugfs_regs_read(struct file *filp, char __user *buf, size_t size,
423 char tmp[REGS_DUMP_LINE_SIZE + 1];
424 u32 *regs = filp->private_data;
427 if ((*f_pos < 0) || (size < (sizeof(tmp) - 1)))
430 i = ((int) *f_pos) / (sizeof(tmp) - 1);
434 size = snprintf(tmp, sizeof(tmp), REGS_DUMP_FORMAT, i * 4, regs[i]);
437 if (copy_to_user(buf, tmp + *f_pos - (i * (sizeof(tmp) - 1)),
447 static const struct file_operations pvr_debugfs_regs_fops = {
448 .owner = THIS_MODULE,
449 .llseek = pvr_debugfs_regs_llseek,
450 .read = pvr_debugfs_regs_read,
451 .open = pvr_debugfs_regs_open,
452 .release = pvr_debugfs_regs_release,
458 * HW Recovery dumping support.
461 static struct mutex hwrec_mutex[1];
462 static struct timeval hwrec_time;
463 static int hwrec_open_count;
464 static DECLARE_WAIT_QUEUE_HEAD(hwrec_wait_queue);
465 static int hwrec_event;
467 /* add extra locking to keep us from overwriting things during dumping. */
468 static int hwrec_event_open_count;
469 static int hwrec_event_file_lock;
471 /* While these could get moved into PVRSRV_SGXDEV_INFO, the more future-proof
472 * way of handling hw recovery events is by providing 1 single hwrecovery dump
473 * at a time, and adding a hwrec_info debugfs file with: process information,
474 * general driver information, and the instance of the (then multicore) pvr
475 * where the hwrec event happened.
477 static u32 *hwrec_registers;
479 #ifdef CONFIG_PVR_DEBUG
480 static size_t hwrec_mem_size;
481 #define HWREC_MEM_PAGES (4 * PAGE_SIZE)
482 static unsigned long hwrec_mem_pages[HWREC_MEM_PAGES];
483 #endif /* CONFIG_PVR_DEBUG */
485 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
486 static struct edm_buf_info *hwrec_edm_buf;
490 hwrec_registers_dump(struct PVRSRV_SGXDEV_INFO *psDevInfo)
494 if (!hwrec_registers) {
495 hwrec_registers = (u32 *) __get_free_page(GFP_KERNEL);
496 if (!hwrec_registers) {
497 pr_err("%s: failed to get free page.\n", __func__);
502 for (i = 0; i < 1024; i++)
503 hwrec_registers[i] = readl(psDevInfo->pvRegsBaseKM + 4 * i);
507 hwrec_pages_free(size_t *size, unsigned long *pages)
514 for (i = 0; (i * PAGE_SIZE) < *size; i++) {
523 hwrec_pages_write(u8 *buffer, size_t size, size_t *current_size,
524 unsigned long *pages, int array_size)
530 size_t offset = *current_size & ~PAGE_MASK;
531 int page = *current_size / PAGE_SIZE;
534 if (((*current_size) / PAGE_SIZE) >= array_size) {
535 pr_err("%s: Size overrun!\n", __func__);
539 pages[page] = __get_free_page(GFP_KERNEL);
541 pr_err("%s: failed to get free page.\n",
547 if (count > (PAGE_SIZE - offset))
548 count = PAGE_SIZE - offset;
550 memcpy(((u8 *) pages[page]) + offset, buffer, count);
555 *current_size += count;
561 #ifdef CONFIG_PVR_DEBUG
565 hwrec_pages_free(&hwrec_mem_size, hwrec_mem_pages);
569 hwrec_mem_write(u8 *buffer, size_t size)
571 return hwrec_pages_write(buffer, size, &hwrec_mem_size,
572 hwrec_mem_pages, ARRAY_SIZE(hwrec_mem_pages));
576 hwrec_mem_print(char *format, ...)
582 va_start(ap, format);
583 size = vscnprintf(tmp, sizeof(tmp), format, ap);
586 return hwrec_mem_write(tmp, size);
588 #endif /* CONFIG_PVR_DEBUG */
591 * Render status buffer dumping.
593 static size_t hwrec_status_size;
594 static unsigned long hwrec_status_pages[1024];
597 hwrec_status_write(char *buffer, size_t size)
599 return hwrec_pages_write(buffer, size, &hwrec_status_size,
601 ARRAY_SIZE(hwrec_status_pages));
605 hwrec_status_free(void)
607 hwrec_pages_free(&hwrec_status_size, hwrec_status_pages);
611 hwrec_status_print(char *format, ...)
617 va_start(ap, format);
618 size = vscnprintf(tmp, sizeof(tmp), format, ap);
621 return hwrec_status_write(tmp, size);
624 #define BUF_DESC_CORRUPT (1 << 31)
626 static void add_uniq_items(struct render_state_buf_list *dst,
627 const struct render_state_buf_list *src)
631 for (i = 0; i < src->cnt; i++) {
632 const struct render_state_buf_info *sbinf = &src->info[i];
635 for (j = 0; j < dst->cnt; j++) {
636 if (sbinf->buf_id == dst->info[j].buf_id) {
637 if (memcmp(sbinf, &dst->info[j],
639 dst->info[j].type |= BUF_DESC_CORRUPT;
644 /* Bound for cnt is guaranteed by the caller */
645 dst->info[dst->cnt] = *sbinf;
651 static struct render_state_buf_list *create_merged_uniq_list(
652 struct render_state_buf_list **bl_set, int set_size)
655 struct render_state_buf_list *dbl;
659 * Create a buf list big enough to contain all elements from each
662 size = offsetof(struct render_state_buf_list, info[0]);
663 for (i = 0; i < set_size; i++) {
666 size += bl_set[i]->cnt * sizeof(bl_set[i]->info[0]);
670 dbl = kmalloc(size, GFP_KERNEL);
675 for (i = 0; i < set_size; i++) {
677 add_uniq_items(dbl, bl_set[i]);
683 static void *vmap_buf(struct PVRSRV_PER_PROCESS_DATA *proc,
684 u32 handle, off_t offset, size_t size)
686 struct PVRSRV_KERNEL_MEM_INFO *minfo;
687 struct LinuxMemArea *mem_area;
688 enum PVRSRV_ERROR err;
696 if (offset & PAGE_MASK)
699 err = PVRSRVLookupHandle(proc->psHandleBase, (void **)&minfo,
700 (void *)handle, PVRSRV_HANDLE_TYPE_MEM_INFO);
701 if (err != PVRSRV_OK)
703 if (minfo->pvLinAddrKM)
704 return minfo->pvLinAddrKM;
706 err = PVRSRVLookupOSMemHandle(proc->psHandleBase, (void *)&mem_area,
708 if (err != PVRSRV_OK)
711 start_ofs = offset & PAGE_MASK;
712 end_ofs = PAGE_ALIGN(offset + size);
713 pg_cnt = (end_ofs - start_ofs) >> PAGE_SHIFT;
714 pages = kmalloc(pg_cnt * sizeof(pages[0]), GFP_KERNEL);
717 for (i = 0; i < pg_cnt; i++) {
720 pfn = LinuxMemAreaToCpuPFN(mem_area, start_ofs);
723 pages[i] = pfn_to_page(pfn);
724 start_ofs += PAGE_SIZE;
726 map = vmap(pages, pg_cnt, VM_MAP, PAGE_KERNEL);
734 static void vunmap_buf(struct PVRSRV_PER_PROCESS_DATA *proc,
735 u32 handle, void *map)
737 struct PVRSRV_KERNEL_MEM_INFO *minfo;
738 enum PVRSRV_ERROR err;
740 err = PVRSRVLookupHandle(proc->psHandleBase, (void **)&minfo,
741 (void *)handle, PVRSRV_HANDLE_TYPE_MEM_INFO);
742 if (err != PVRSRV_OK)
744 if (minfo->pvLinAddrKM)
746 vunmap((void *)(((unsigned long)map) & PAGE_MASK));
749 static void dump_buf(void *start, size_t size, u32 type)
753 if (type & BUF_DESC_CORRUPT) {
754 type &= ~BUF_DESC_CORRUPT;
757 hwrec_status_print("<type %d%s size %d>\n", type, corr, size);
758 hwrec_status_write(start, size);
761 static struct render_state_buf_list *get_state_buf_list(
762 struct PVRSRV_PER_PROCESS_DATA *proc,
763 u32 handle, off_t offset)
765 struct PVRSRV_KERNEL_MEM_INFO *container;
766 struct render_state_buf_list *buf;
767 enum PVRSRV_ERROR err;
769 err = PVRSRVLookupHandle(proc->psHandleBase, (void **)&container,
770 (void *)handle, PVRSRV_HANDLE_TYPE_MEM_INFO);
771 if (err != PVRSRV_OK)
773 if (!container->pvLinAddrKM)
775 if (offset + sizeof(*buf) > container->ui32AllocSize)
778 buf = container->pvLinAddrKM + offset;
780 if (buf->cnt > ARRAY_SIZE(buf->info))
786 static void dump_state_buf_list(struct PVRSRV_PER_PROCESS_DATA *proc,
787 struct render_state_buf_list *bl)
794 pr_info("Dumping %d render state buffers\n", bl->cnt);
795 for (i = 0; i < bl->cnt; i++) {
796 struct render_state_buf_info *binfo;
799 binfo = &bl->info[i];
801 map = vmap_buf(proc, binfo->buf_id, binfo->offset, binfo->size);
804 dump_buf(map, binfo->size, binfo->type);
806 vunmap_buf(proc, binfo->buf_id, map);
810 static void dump_sgx_state_bufs(struct PVRSRV_PER_PROCESS_DATA *proc,
811 struct PVRSRV_SGXDEV_INFO *dev_info)
813 struct SGXMKIF_HOST_CTL __iomem *hctl = dev_info->psSGXHostCtl;
814 struct render_state_buf_list *bl_set[2] = { NULL };
815 struct render_state_buf_list *mbl;
822 handle_ta = readl(&hctl->render_state_buf_ta_handle);
823 handle_3d = readl(&hctl->render_state_buf_3d_handle);
824 bl_set[0] = get_state_buf_list(proc, handle_ta,
825 dev_info->state_buf_ofs);
827 * The two buf list can be the same if the TA and 3D phases used the
828 * same context at the time of the HWrec. In this case just ignore
831 if (handle_ta != handle_3d)
832 bl_set[1] = get_state_buf_list(proc, handle_3d,
833 dev_info->state_buf_ofs);
834 mbl = create_merged_uniq_list(bl_set, ARRAY_SIZE(bl_set));
838 dump_state_buf_list(proc, mbl);
843 pvr_hwrec_dump(struct PVRSRV_PER_PROCESS_DATA *proc_data,
844 struct PVRSRV_SGXDEV_INFO *psDevInfo)
846 mutex_lock(hwrec_mutex);
848 if (hwrec_open_count || hwrec_event_file_lock) {
849 pr_err("%s: previous hwrec dump is still locked!\n", __func__);
850 mutex_unlock(hwrec_mutex);
854 do_gettimeofday(&hwrec_time);
855 pr_info("HW Recovery dump generated at %010ld%06ld\n",
856 hwrec_time.tv_sec, hwrec_time.tv_usec);
858 hwrec_registers_dump(psDevInfo);
860 #ifdef CONFIG_PVR_DEBUG
862 mmu_hwrec_mem_dump(psDevInfo);
863 #endif /* CONFIG_PVR_DEBUG */
865 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
867 pvr_edm_buffer_destroy(hwrec_edm_buf);
868 hwrec_edm_buf = pvr_edm_buffer_create(psDevInfo);
872 dump_sgx_state_bufs(proc_data, psDevInfo);
876 mutex_unlock(hwrec_mutex);
878 wake_up_interruptible(&hwrec_wait_queue);
885 hwrec_file_open(struct inode *inode, struct file *filp)
887 mutex_lock(hwrec_mutex);
891 mutex_unlock(hwrec_mutex);
896 hwrec_file_release(struct inode *inode, struct file *filp)
898 mutex_lock(hwrec_mutex);
902 mutex_unlock(hwrec_mutex);
907 * Provides a hwrec timestamp for unique dumping.
910 hwrec_time_read(struct file *filp, char __user *buf, size_t size,
915 mutex_lock(hwrec_mutex);
916 snprintf(tmp, sizeof(tmp), "%010ld%06ld",
917 hwrec_time.tv_sec, hwrec_time.tv_usec);
918 mutex_unlock(hwrec_mutex);
920 return simple_read_from_buffer(buf, size, f_pos, tmp, strlen(tmp));
923 static const struct file_operations hwrec_time_fops = {
924 .owner = THIS_MODULE,
926 .read = hwrec_time_read,
927 .open = hwrec_file_open,
928 .release = hwrec_file_release,
932 * Blocks the reader until a HWRec happens.
935 hwrec_event_open(struct inode *inode, struct file *filp)
939 mutex_lock(hwrec_mutex);
941 if (hwrec_event_open_count)
944 hwrec_event_open_count++;
948 mutex_unlock(hwrec_mutex);
954 hwrec_event_release(struct inode *inode, struct file *filp)
956 mutex_lock(hwrec_mutex);
958 hwrec_event_open_count--;
960 mutex_unlock(hwrec_mutex);
967 hwrec_event_read(struct file *filp, char __user *buf, size_t size,
972 mutex_lock(hwrec_mutex);
974 hwrec_event_file_lock = 0;
976 mutex_unlock(hwrec_mutex);
978 ret = wait_event_interruptible(hwrec_wait_queue, hwrec_event);
980 mutex_lock(hwrec_mutex);
983 hwrec_event_file_lock = 1;
985 mutex_unlock(hwrec_mutex);
991 static const struct file_operations hwrec_event_fops = {
992 .owner = THIS_MODULE,
994 .read = hwrec_event_read,
995 .open = hwrec_event_open,
996 .release = hwrec_event_release,
1000 * Reads out all readable registers.
1003 hwrec_regs_llseek(struct file *filp, loff_t offset, int whence)
1007 mutex_lock(hwrec_mutex);
1009 if (hwrec_registers)
1010 f_pos = pvr_debugfs_llseek_helper(filp, offset, whence,
1011 1024 * REGS_DUMP_LINE_SIZE);
1015 mutex_unlock(hwrec_mutex);
1021 hwrec_regs_read(struct file *filp, char __user *buf, size_t size,
1024 char tmp[REGS_DUMP_LINE_SIZE + 1];
1027 if ((*f_pos < 0) || (size < (sizeof(tmp) - 1)))
1030 i = ((int) *f_pos) / (sizeof(tmp) - 1);
1034 mutex_lock(hwrec_mutex);
1036 if (!hwrec_registers)
1039 size = snprintf(tmp, sizeof(tmp), REGS_DUMP_FORMAT, i * 4,
1040 hwrec_registers[i]);
1042 mutex_unlock(hwrec_mutex);
1045 if (copy_to_user(buf, tmp + *f_pos - (i * (sizeof(tmp) - 1)),
1055 static const struct file_operations hwrec_regs_fops = {
1056 .owner = THIS_MODULE,
1057 .llseek = hwrec_regs_llseek,
1058 .read = hwrec_regs_read,
1059 .open = hwrec_file_open,
1060 .release = hwrec_file_release,
1063 #ifdef CONFIG_PVR_DEBUG
1065 * Provides a full context dump: page directory, page tables, and all mapped
1069 hwrec_mem_llseek(struct file *filp, loff_t offset, int whence)
1073 mutex_lock(hwrec_mutex);
1076 f_pos = pvr_debugfs_llseek_helper(filp, offset, whence,
1081 mutex_unlock(hwrec_mutex);
1087 hwrec_mem_read(struct file *filp, char __user *buf, size_t size,
1090 mutex_lock(hwrec_mutex);
1092 if ((*f_pos >= 0) && (*f_pos < hwrec_mem_size)) {
1095 size = min(size, (size_t) hwrec_mem_size - (size_t) *f_pos);
1097 page = (*f_pos) / PAGE_SIZE;
1098 offset = (*f_pos) & ~PAGE_MASK;
1100 size = min(size, (size_t) PAGE_SIZE - offset);
1102 if (copy_to_user(buf,
1103 ((u8 *) hwrec_mem_pages[page]) + offset,
1105 mutex_unlock(hwrec_mutex);
1111 mutex_unlock(hwrec_mutex);
1117 static const struct file_operations hwrec_mem_fops = {
1118 .owner = THIS_MODULE,
1119 .llseek = hwrec_mem_llseek,
1120 .read = hwrec_mem_read,
1121 .open = hwrec_file_open,
1122 .release = hwrec_file_release,
1124 #endif /* CONFIG_PVR_DEBUG */
1127 * Read out edm trace created before HW recovery reset.
1129 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1131 hwrec_edm_llseek(struct file *filp, loff_t offset, int whence)
1135 mutex_lock(hwrec_mutex);
1138 f_pos = pvr_debugfs_llseek_helper(filp, offset, whence,
1139 hwrec_edm_buf->len);
1143 mutex_unlock(hwrec_mutex);
1149 hwrec_edm_read(struct file *filp, char __user *buf, size_t size,
1154 mutex_lock(hwrec_mutex);
1157 ret = simple_read_from_buffer(buf, size, f_pos,
1158 hwrec_edm_buf->data,
1159 hwrec_edm_buf->len);
1163 mutex_unlock(hwrec_mutex);
1168 static const struct file_operations hwrec_edm_fops = {
1169 .owner = THIS_MODULE,
1170 .llseek = hwrec_edm_llseek,
1171 .read = hwrec_edm_read,
1172 .open = hwrec_file_open,
1173 .release = hwrec_file_release,
1175 #endif /* PVRSRV_USSE_EDM_STATUS_DEBUG */
1178 * Provides a dump of the TA and 3D status buffers.
1181 hwrec_status_llseek(struct file *filp, loff_t offset, int whence)
1185 mutex_lock(hwrec_mutex);
1187 if (hwrec_status_size)
1188 f_pos = pvr_debugfs_llseek_helper(filp, offset, whence,
1193 mutex_unlock(hwrec_mutex);
1199 hwrec_status_read(struct file *filp, char __user *buf, size_t size,
1202 mutex_lock(hwrec_mutex);
1204 if ((*f_pos >= 0) && (*f_pos < hwrec_status_size)) {
1207 size = min(size, (size_t) hwrec_status_size - (size_t) *f_pos);
1209 page = (*f_pos) / PAGE_SIZE;
1210 offset = (*f_pos) & ~PAGE_MASK;
1212 size = min(size, (size_t) PAGE_SIZE - offset);
1214 if (copy_to_user(buf,
1215 ((u8 *) hwrec_status_pages[page]) + offset,
1217 mutex_unlock(hwrec_mutex);
1223 mutex_unlock(hwrec_mutex);
1229 static const struct file_operations hwrec_status_fops = {
1230 .owner = THIS_MODULE,
1231 .llseek = hwrec_status_llseek,
1232 .read = hwrec_status_read,
1233 .open = hwrec_file_open,
1234 .release = hwrec_file_release,
1240 int pvr_debugfs_init(void)
1242 mutex_init(hwrec_mutex);
1244 pvr_debugfs_dir = debugfs_create_dir("pvr", NULL);
1245 if (!pvr_debugfs_dir)
1248 if (!debugfs_create_file("reset_sgx", S_IWUSR, pvr_debugfs_dir,
1249 &pvr_reset, &pvr_debugfs_reset_fops)) {
1250 debugfs_remove(pvr_debugfs_dir);
1254 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1255 if (!debugfs_create_file("edm_trace", S_IRUGO, pvr_debugfs_dir, NULL,
1256 &pvr_debugfs_edm_fops)) {
1257 debugfs_remove_recursive(pvr_debugfs_dir);
1261 #ifdef CONFIG_PVR_TRACE_CMD
1262 if (!debugfs_create_file("command_trace", S_IRUGO, pvr_debugfs_dir,
1263 NULL, &pvr_dbg_trcmd_fops)) {
1264 debugfs_remove_recursive(pvr_debugfs_dir);
1269 if (!debugfs_create_file("registers", S_IRUSR, pvr_debugfs_dir, NULL,
1270 &pvr_debugfs_regs_fops)) {
1271 debugfs_remove(pvr_debugfs_dir);
1275 if (!debugfs_create_file("hwrec_event", S_IRUSR, pvr_debugfs_dir, NULL,
1276 &hwrec_event_fops)) {
1277 debugfs_remove_recursive(pvr_debugfs_dir);
1281 if (!debugfs_create_file("hwrec_time", S_IRUSR, pvr_debugfs_dir, NULL,
1282 &hwrec_time_fops)) {
1283 debugfs_remove_recursive(pvr_debugfs_dir);
1287 if (!debugfs_create_file("hwrec_regs", S_IRUSR, pvr_debugfs_dir, NULL,
1288 &hwrec_regs_fops)) {
1289 debugfs_remove_recursive(pvr_debugfs_dir);
1293 #ifdef CONFIG_PVR_DEBUG
1294 if (!debugfs_create_file("hwrec_mem", S_IRUSR, pvr_debugfs_dir, NULL,
1296 debugfs_remove_recursive(pvr_debugfs_dir);
1299 #endif /* CONFIG_PVR_DEBUG */
1301 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1302 if (!debugfs_create_file("hwrec_edm", S_IRUSR, pvr_debugfs_dir, NULL,
1304 debugfs_remove_recursive(pvr_debugfs_dir);
1309 if (!debugfs_create_file("hwrec_status", S_IRUSR, pvr_debugfs_dir, NULL,
1310 &hwrec_status_fops)) {
1311 debugfs_remove_recursive(pvr_debugfs_dir);
1318 void pvr_debugfs_cleanup(void)
1320 debugfs_remove_recursive(pvr_debugfs_dir);
1322 if (hwrec_registers)
1323 free_page((u32) hwrec_registers);
1325 #ifdef CONFIG_PVR_DEBUG
1327 #endif /* CONFIG_PVR_DEBUG */
1329 #ifdef PVRSRV_USSE_EDM_STATUS_DEBUG
1331 pvr_edm_buffer_destroy(hwrec_edm_buf);
1334 hwrec_status_free();