4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * Implements upper edge functions for Bridge message module.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
18 #include <linux/types.h>
20 /* ----------------------------------- DSP/BIOS Bridge */
21 #include <dspbridge/dbdefs.h>
23 /* ----------------------------------- Trace & Debug */
24 #include <dspbridge/dbc.h>
26 /* ----------------------------------- OS Adaptation Layer */
27 #include <dspbridge/sync.h>
29 /* ----------------------------------- Platform Manager */
30 #include <dspbridge/dev.h>
32 /* ----------------------------------- Others */
33 #include <dspbridge/io_sm.h>
35 /* ----------------------------------- This */
37 #include <dspbridge/dspmsg.h>
39 /* ----------------------------------- Function Prototypes */
40 static int add_new_msg(struct list_head *msg_list);
41 static void delete_msg_mgr(struct msg_mgr *hmsg_mgr);
42 static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp);
43 static void free_msg_list(struct list_head *msg_list);
46 * ======== bridge_msg_create ========
47 * Create an object to manage message queues. Only one of these objects
48 * can exist per device object.
50 int bridge_msg_create(struct msg_mgr **msg_man,
51 struct dev_object *hdev_obj,
52 msg_onexit msg_callback)
54 struct msg_mgr *msg_mgr_obj;
55 struct io_mgr *hio_mgr;
58 if (!msg_man || !msg_callback || !hdev_obj)
61 dev_get_io_mgr(hdev_obj, &hio_mgr);
66 /* Allocate msg_ctrl manager object */
67 msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL);
71 msg_mgr_obj->on_exit = msg_callback;
72 msg_mgr_obj->iomgr = hio_mgr;
73 /* List of MSG_QUEUEs */
74 INIT_LIST_HEAD(&msg_mgr_obj->queue_list);
76 * Queues of message frames for messages to the DSP. Message
77 * frames will only be added to the free queue when a
78 * msg_queue object is created.
80 INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list);
81 INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list);
82 spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
85 * Create an event to be used by bridge_msg_put() in waiting
86 * for an available free frame from the message manager.
88 msg_mgr_obj->sync_event =
89 kzalloc(sizeof(struct sync_object), GFP_KERNEL);
90 if (!msg_mgr_obj->sync_event) {
94 sync_init_event(msg_mgr_obj->sync_event);
96 *msg_man = msg_mgr_obj;
102 * ======== bridge_msg_create_queue ========
103 * Create a msg_queue for sending/receiving messages to/from a node
106 int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, struct msg_queue **msgq,
107 u32 msgq_id, u32 max_msgs, void *arg)
110 u32 num_allocated = 0;
111 struct msg_queue *msg_q;
114 if (!hmsg_mgr || msgq == NULL)
118 /* Allocate msg_queue object */
119 msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
123 msg_q->max_msgs = max_msgs;
124 msg_q->msg_mgr = hmsg_mgr;
125 msg_q->arg = arg; /* Node handle */
126 msg_q->msgq_id = msgq_id; /* Node env (not valid yet) */
127 /* Queues of Message frames for messages from the DSP */
128 INIT_LIST_HEAD(&msg_q->msg_free_list);
129 INIT_LIST_HEAD(&msg_q->msg_used_list);
131 /* Create event that will be signalled when a message from
132 * the DSP is available. */
133 msg_q->sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
134 if (!msg_q->sync_event) {
139 sync_init_event(msg_q->sync_event);
141 /* Create a notification list for message ready notification. */
142 msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
143 if (!msg_q->ntfy_obj) {
147 ntfy_init(msg_q->ntfy_obj);
149 /* Create events that will be used to synchronize cleanup
150 * when the object is deleted. sync_done will be set to
151 * unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
152 * will be set by the unblocked thread to signal that it
153 * is unblocked and will no longer reference the object. */
154 msg_q->sync_done = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
155 if (!msg_q->sync_done) {
159 sync_init_event(msg_q->sync_done);
161 msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
162 if (!msg_q->sync_done_ack) {
166 sync_init_event(msg_q->sync_done_ack);
168 /* Enter critical section */
169 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
170 /* Initialize message frames and put in appropriate queues */
171 for (i = 0; i < max_msgs && !status; i++) {
172 status = add_new_msg(&hmsg_mgr->msg_free_list);
175 status = add_new_msg(&msg_q->msg_free_list);
179 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
183 list_add_tail(&msg_q->list_elem, &hmsg_mgr->queue_list);
185 /* Signal that free frames are now available */
186 if (!list_empty(&hmsg_mgr->msg_free_list))
187 sync_set_event(hmsg_mgr->sync_event);
189 /* Exit critical section */
190 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
194 delete_msg_queue(msg_q, num_allocated);
199 * ======== bridge_msg_delete ========
200 * Delete a msg_ctrl manager allocated in bridge_msg_create().
202 void bridge_msg_delete(struct msg_mgr *hmsg_mgr)
205 delete_msg_mgr(hmsg_mgr);
209 * ======== bridge_msg_delete_queue ========
210 * Delete a msg_ctrl queue allocated in bridge_msg_create_queue.
212 void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
214 struct msg_mgr *hmsg_mgr;
217 if (!msg_queue_obj || !msg_queue_obj->msg_mgr)
220 hmsg_mgr = msg_queue_obj->msg_mgr;
221 msg_queue_obj->done = true;
222 /* Unblock all threads blocked in MSG_Get() or MSG_Put(). */
223 io_msg_pend = msg_queue_obj->io_msg_pend;
224 while (io_msg_pend) {
226 sync_set_event(msg_queue_obj->sync_done);
227 /* Wait for acknowledgement */
228 sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE);
229 io_msg_pend = msg_queue_obj->io_msg_pend;
231 /* Remove message queue from hmsg_mgr->queue_list */
232 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
233 list_del(&msg_queue_obj->list_elem);
234 /* Free the message queue object */
235 delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
236 if (list_empty(&hmsg_mgr->msg_free_list))
237 sync_reset_event(hmsg_mgr->sync_event);
238 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
242 * ======== bridge_msg_get ========
243 * Get a message from a msg_ctrl queue.
245 int bridge_msg_get(struct msg_queue *msg_queue_obj,
246 struct dsp_msg *pmsg, u32 utimeout)
248 struct msg_frame *msg_frame_obj;
249 struct msg_mgr *hmsg_mgr;
250 struct sync_object *syncs[2];
254 if (!msg_queue_obj || pmsg == NULL)
257 hmsg_mgr = msg_queue_obj->msg_mgr;
259 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
260 /* If a message is already there, get it */
261 if (!list_empty(&msg_queue_obj->msg_used_list)) {
262 msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list,
263 struct msg_frame, list_elem);
264 list_del(&msg_frame_obj->list_elem);
265 *pmsg = msg_frame_obj->msg_data.msg;
266 list_add_tail(&msg_frame_obj->list_elem,
267 &msg_queue_obj->msg_free_list);
268 if (list_empty(&msg_queue_obj->msg_used_list))
269 sync_reset_event(msg_queue_obj->sync_event);
270 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
274 if (msg_queue_obj->done) {
275 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
278 msg_queue_obj->io_msg_pend++;
279 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
282 * Wait til message is available, timeout, or done. We don't
283 * have to schedule the DPC, since the DSP will send messages
284 * when they are available.
286 syncs[0] = msg_queue_obj->sync_event;
287 syncs[1] = msg_queue_obj->sync_done;
288 status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index);
290 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
291 if (msg_queue_obj->done) {
292 msg_queue_obj->io_msg_pend--;
293 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
295 * Signal that we're not going to access msg_queue_obj
296 * anymore, so it can be deleted.
298 sync_set_event(msg_queue_obj->sync_done_ack);
301 if (!status && !list_empty(&msg_queue_obj->msg_used_list)) {
302 /* Get msg from used list */
303 msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list,
304 struct msg_frame, list_elem);
305 list_del(&msg_frame_obj->list_elem);
306 /* Copy message into pmsg and put frame on the free list */
307 *pmsg = msg_frame_obj->msg_data.msg;
308 list_add_tail(&msg_frame_obj->list_elem,
309 &msg_queue_obj->msg_free_list);
311 msg_queue_obj->io_msg_pend--;
312 /* Reset the event if there are still queued messages */
313 if (!list_empty(&msg_queue_obj->msg_used_list))
314 sync_set_event(msg_queue_obj->sync_event);
316 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
322 * ======== bridge_msg_put ========
323 * Put a message onto a msg_ctrl queue.
325 int bridge_msg_put(struct msg_queue *msg_queue_obj,
326 const struct dsp_msg *pmsg, u32 utimeout)
328 struct msg_frame *msg_frame_obj;
329 struct msg_mgr *hmsg_mgr;
330 struct sync_object *syncs[2];
334 if (!msg_queue_obj || !pmsg || !msg_queue_obj->msg_mgr)
337 hmsg_mgr = msg_queue_obj->msg_mgr;
339 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
341 /* If a message frame is available, use it */
342 if (!list_empty(&hmsg_mgr->msg_free_list)) {
343 msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list,
344 struct msg_frame, list_elem);
345 list_del(&msg_frame_obj->list_elem);
346 msg_frame_obj->msg_data.msg = *pmsg;
347 msg_frame_obj->msg_data.msgq_id =
348 msg_queue_obj->msgq_id;
349 list_add_tail(&msg_frame_obj->list_elem,
350 &hmsg_mgr->msg_used_list);
351 hmsg_mgr->msgs_pending++;
353 if (list_empty(&hmsg_mgr->msg_free_list))
354 sync_reset_event(hmsg_mgr->sync_event);
356 /* Release critical section before scheduling DPC */
357 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
358 /* Schedule a DPC, to do the actual data transfer: */
359 iosm_schedule(hmsg_mgr->iomgr);
363 if (msg_queue_obj->done) {
364 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
367 msg_queue_obj->io_msg_pend++;
369 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
371 /* Wait til a free message frame is available, timeout, or done */
372 syncs[0] = hmsg_mgr->sync_event;
373 syncs[1] = msg_queue_obj->sync_done;
374 status = sync_wait_on_multiple_events(syncs, 2, utimeout, &index);
378 /* Enter critical section */
379 spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
380 if (msg_queue_obj->done) {
381 msg_queue_obj->io_msg_pend--;
382 /* Exit critical section */
383 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
385 * Signal that we're not going to access msg_queue_obj
386 * anymore, so it can be deleted.
388 sync_set_event(msg_queue_obj->sync_done_ack);
392 if (list_empty(&hmsg_mgr->msg_free_list)) {
393 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
397 /* Get msg from free list */
398 msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list,
399 struct msg_frame, list_elem);
401 * Copy message into pmsg and put frame on the
404 list_del(&msg_frame_obj->list_elem);
405 msg_frame_obj->msg_data.msg = *pmsg;
406 msg_frame_obj->msg_data.msgq_id = msg_queue_obj->msgq_id;
407 list_add_tail(&msg_frame_obj->list_elem, &hmsg_mgr->msg_used_list);
408 hmsg_mgr->msgs_pending++;
410 * Schedule a DPC, to do the actual
413 iosm_schedule(hmsg_mgr->iomgr);
415 msg_queue_obj->io_msg_pend--;
416 /* Reset event if there are still frames available */
417 if (!list_empty(&hmsg_mgr->msg_free_list))
418 sync_set_event(hmsg_mgr->sync_event);
420 /* Exit critical section */
421 spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
427 * ======== bridge_msg_register_notify ========
429 int bridge_msg_register_notify(struct msg_queue *msg_queue_obj,
430 u32 event_mask, u32 notify_type,
431 struct dsp_notification *hnotification)
435 if (!msg_queue_obj || !hnotification) {
440 if (!(event_mask == DSP_NODEMESSAGEREADY || event_mask == 0)) {
445 if (notify_type != DSP_SIGNALEVENT) {
451 status = ntfy_register(msg_queue_obj->ntfy_obj, hnotification,
452 event_mask, notify_type);
454 status = ntfy_unregister(msg_queue_obj->ntfy_obj,
457 if (status == -EINVAL) {
458 /* Not registered. Ok, since we couldn't have known. Node
459 * notifications are split between node state change handled
460 * by NODE, and message ready handled by msg_ctrl. */
468 * ======== bridge_msg_set_queue_id ========
470 void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id)
473 * A message queue must be created when a node is allocated,
474 * so that node_register_notify() can be called before the node
475 * is created. Since we don't know the node environment until the
476 * node is created, we need this function to set msg_queue_obj->msgq_id
477 * to the node environment, after the node is created.
480 msg_queue_obj->msgq_id = msgq_id;
484 * ======== add_new_msg ========
485 * Must be called in message manager critical section.
487 static int add_new_msg(struct list_head *msg_list)
489 struct msg_frame *pmsg;
491 pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
495 list_add_tail(&pmsg->list_elem, msg_list);
501 * ======== delete_msg_mgr ========
503 static void delete_msg_mgr(struct msg_mgr *hmsg_mgr)
508 /* FIXME: free elements from queue_list? */
509 free_msg_list(&hmsg_mgr->msg_free_list);
510 free_msg_list(&hmsg_mgr->msg_used_list);
511 kfree(hmsg_mgr->sync_event);
516 * ======== delete_msg_queue ========
518 static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp)
520 struct msg_mgr *hmsg_mgr;
521 struct msg_frame *pmsg, *tmp;
524 if (!msg_queue_obj || !msg_queue_obj->msg_mgr)
527 hmsg_mgr = msg_queue_obj->msg_mgr;
529 /* Pull off num_to_dsp message frames from Msg manager and free */
531 list_for_each_entry_safe(pmsg, tmp, &hmsg_mgr->msg_free_list,
533 list_del(&pmsg->list_elem);
535 if (i++ >= num_to_dsp)
539 free_msg_list(&msg_queue_obj->msg_free_list);
540 free_msg_list(&msg_queue_obj->msg_used_list);
542 if (msg_queue_obj->ntfy_obj) {
543 ntfy_delete(msg_queue_obj->ntfy_obj);
544 kfree(msg_queue_obj->ntfy_obj);
547 kfree(msg_queue_obj->sync_event);
548 kfree(msg_queue_obj->sync_done);
549 kfree(msg_queue_obj->sync_done_ack);
551 kfree(msg_queue_obj);
555 * ======== free_msg_list ========
557 static void free_msg_list(struct list_head *msg_list)
559 struct msg_frame *pmsg, *tmp;
564 list_for_each_entry_safe(pmsg, tmp, msg_list, list_elem) {
565 list_del(&pmsg->list_elem);