void *hDevCookieInt;
void *pvSyncInfo;
struct PVRSRV_SGXDEV_INFO *psDevInfo;
+ struct PVRSRV_FILE_PRIVATE_DATA *priv = filp->private_data;
PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID,
PVRSRV_BRIDGE_SGX_2DQUERYBLTSCOMPLETE);
if (psRetOUT->eError != PVRSRV_OK)
return 0;
+ if (ps2DQueryBltsCompleteIN->type == _PVR_SYNC_WAIT_FLIP) {
+ if (pvr_flip_event_req(priv,
+ (long)ps2DQueryBltsCompleteIN->
+ hKernSyncInfo,
+ ps2DQueryBltsCompleteIN->user_data))
+ psRetOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+
+ return 0;
+ }
+
psRetOUT->eError =
PVRSRVLookupHandle(psPerProc->psHandleBase, &pvSyncInfo,
ps2DQueryBltsCompleteIN->hKernSyncInfo,
hDevCookieInt)->pvDevice;
if (ps2DQueryBltsCompleteIN->type == _PVR_SYNC_WAIT_EVENT) {
- struct PVRSRV_FILE_PRIVATE_DATA *priv = filp->private_data;
-
if (pvr_sync_event_req(priv,
(struct PVRSRV_KERNEL_SYNC_INFO *)pvSyncInfo,
ps2DQueryBltsCompleteIN->user_data))
static spinlock_t event_lock;
static struct list_head sync_wait_list;
+static struct list_head flip_wait_list;
static inline bool is_render_complete(const struct PVRSRV_SYNC_DATA *sync)
{
return 0;
}
+static void pvr_signal_flip_event(struct pvr_pending_flip_event *e,
+ const struct timeval *now)
+{
+ e->event.tv_sec = now->tv_sec;
+ e->event.tv_usec = now->tv_usec;
+
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+}
+
+int pvr_flip_event_req(struct PVRSRV_FILE_PRIVATE_DATA *priv,
+ unsigned int overlay, u64 user_data)
+{
+ struct pvr_pending_flip_event *e;
+ struct timeval now;
+ unsigned long flags;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (e == NULL)
+ return -ENOMEM;
+
+ e->event.base.type = PVR_EVENT_FLIP;
+ e->event.base.length = sizeof(e->event);
+ e->base.event = &e->event.base;
+ e->event.overlay = overlay;
+ e->event.user_data = user_data;
+ e->base.file_priv = priv;
+ e->base.destroy = (void (*)(struct pvr_pending_event *))kfree;
+
+ do_gettimeofday(&now);
+ spin_lock_irqsave(&event_lock, flags);
+
+ if (priv->event_space < sizeof(e->event)) {
+ spin_unlock_irqrestore(&event_lock, flags);
+ kfree(e);
+ return -ENOMEM;
+ }
+
+ priv->event_space -= sizeof(e->event);
+
+ list_add_tail(&e->base.link, &flip_wait_list);
+ pvr_signal_flip_event(e, &now);
+
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ return 0;
+}
+
static bool pvr_dequeue_event(struct PVRSRV_FILE_PRIVATE_DATA *priv,
size_t total, size_t max, struct pvr_pending_event **out)
{
struct pvr_pending_event *z;
struct pvr_pending_sync_event *e;
struct pvr_pending_sync_event *t;
+ struct pvr_pending_flip_event *e2;
+ struct pvr_pending_flip_event *t2;
unsigned long flags;
spin_lock_irqsave(&event_lock, flags);
e->base.destroy(&e->base);
}
+ list_for_each_entry_safe(e2, t2, &flip_wait_list, base.link)
+ if (e2->base.file_priv == priv) {
+ list_del(&e2->base.link);
+ e2->base.destroy(&e2->base);
+ }
+
/* Remove unconsumed events */
list_for_each_entry_safe(w, z, &priv->event_list, link)
w->destroy(w);
{
spin_lock_init(&event_lock);
INIT_LIST_HEAD(&sync_wait_list);
+ INIT_LIST_HEAD(&flip_wait_list);
}
};
#define PVR_EVENT_SYNC 0x01
+#define PVR_EVENT_FLIP 0x02
/*
* Every buffer used as a render target has a 'PVRSRV_KERNEL_SYNC_INFO'
__u32 tv_usec;
};
+struct pvr_event_flip {
+ struct pvr_event base;
+ __u64 user_data;
+ __u32 tv_sec;
+ __u32 tv_usec;
+ __u32 overlay;
+};
+
/* Event queued up for userspace to read */
struct pvr_pending_event {
struct pvr_event *event;
struct pvr_event_sync event;
};
+struct pvr_pending_flip_event {
+ struct pvr_pending_event base;
+ struct pvr_event_flip event;
+};
+
void pvr_init_events(void);
int pvr_sync_event_req(struct PVRSRV_FILE_PRIVATE_DATA *priv,
const struct PVRSRV_KERNEL_SYNC_INFO *sync_info,
u64 user_data);
+int pvr_flip_event_req(struct PVRSRV_FILE_PRIVATE_DATA *priv,
+ unsigned int overlay, u64 user_data);
ssize_t pvr_read(struct file *filp, char __user *buf, size_t count,
loff_t *off);
unsigned int pvr_poll(struct file *filp, struct poll_table_struct *wait);