Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6
[pandora-kernel.git] / drivers / staging / tidspbridge / core / chnl_sm.c
1 /*
2  * chnl_sm.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * Implements upper edge functions for Bridge driver channel module.
7  *
8  * Copyright (C) 2005-2006 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18
19 /*
20  *      The lower edge functions must be implemented by the Bridge driver
21  *      writer, and are declared in chnl_sm.h.
22  *
23  *      Care is taken in this code to prevent simulataneous access to channel
24  *      queues from
25  *      1. Threads.
26  *      2. io_dpc(), scheduled from the io_isr() as an event.
27  *
28  *      This is done primarily by:
29  *      - Semaphores.
30  *      - state flags in the channel object; and
31  *      - ensuring the IO_Dispatch() routine, which is called from both
32  *        CHNL_AddIOReq() and the DPC(if implemented), is not re-entered.
33  *
34  *  Channel Invariant:
35  *      There is an important invariant condition which must be maintained per
36  *      channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
37  *      which may cause timeouts and/or failure offunction sync_wait_on_event.
38  *      This invariant condition is:
39  *
40  *          LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is reset
41  *      and
42  *          !LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is set.
43  */
44
45 #include <linux/types.h>
46
47 /*  ----------------------------------- OS */
48 #include <dspbridge/host_os.h>
49
50 /*  ----------------------------------- DSP/BIOS Bridge */
51 #include <dspbridge/dbdefs.h>
52
53 /*  ----------------------------------- Trace & Debug */
54 #include <dspbridge/dbc.h>
55
56 /*  ----------------------------------- OS Adaptation Layer */
57 #include <dspbridge/sync.h>
58
59 /*  ----------------------------------- Bridge Driver */
60 #include <dspbridge/dspdefs.h>
61 #include <dspbridge/dspchnl.h>
62 #include "_tiomap.h"
63
64 /*  ----------------------------------- Platform Manager */
65 #include <dspbridge/dev.h>
66
67 /*  ----------------------------------- Others */
68 #include <dspbridge/io_sm.h>
69
70 /*  ----------------------------------- Define for This */
71 #define USERMODE_ADDR   PAGE_OFFSET
72
73 #define MAILBOX_IRQ INT_MAIL_MPU_IRQ
74
75 /*  ----------------------------------- Function Prototypes */
76 static struct lst_list *create_chirp_list(u32 chirps);
77
78 static void free_chirp_list(struct lst_list *chirp_list);
79
80 static struct chnl_irp *make_new_chirp(void);
81
82 static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
83                                       u32 *chnl);
84
85 /*
86  *  ======== bridge_chnl_add_io_req ========
87  *      Enqueue an I/O request for data transfer on a channel to the DSP.
88  *      The direction (mode) is specified in the channel object. Note the DSP
89  *      address is specified for channels opened in direct I/O mode.
90  */
91 int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
92                                u32 byte_size, u32 buf_size,
93                                u32 dw_dsp_addr, u32 dw_arg)
94 {
95         int status = 0;
96         struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
97         struct chnl_irp *chnl_packet_obj = NULL;
98         struct bridge_dev_context *dev_ctxt;
99         struct dev_object *dev_obj;
100         u8 dw_state;
101         bool is_eos;
102         struct chnl_mgr *chnl_mgr_obj = pchnl->chnl_mgr_obj;
103         u8 *host_sys_buf = NULL;
104         bool sched_dpc = false;
105         u16 mb_val = 0;
106
107         is_eos = (byte_size == 0);
108
109         /* Validate args */
110         if (!host_buf || !pchnl) {
111                 status = -EFAULT;
112         } else if (is_eos && CHNL_IS_INPUT(pchnl->chnl_mode)) {
113                 status = -EPERM;
114         } else {
115                 /*
116                  * Check the channel state: only queue chirp if channel state
117                  * allows it.
118                  */
119                 dw_state = pchnl->dw_state;
120                 if (dw_state != CHNL_STATEREADY) {
121                         if (dw_state & CHNL_STATECANCEL)
122                                 status = -ECANCELED;
123                         else if ((dw_state & CHNL_STATEEOS) &&
124                                  CHNL_IS_OUTPUT(pchnl->chnl_mode))
125                                 status = -EPIPE;
126                         else
127                                 /* No other possible states left */
128                                 DBC_ASSERT(0);
129                 }
130         }
131
132         dev_obj = dev_get_first();
133         dev_get_bridge_context(dev_obj, &dev_ctxt);
134         if (!dev_ctxt)
135                 status = -EFAULT;
136
137         if (status)
138                 goto func_end;
139
140         if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1 && host_buf) {
141                 if (!(host_buf < (void *)USERMODE_ADDR)) {
142                         host_sys_buf = host_buf;
143                         goto func_cont;
144                 }
145                 /* if addr in user mode, then copy to kernel space */
146                 host_sys_buf = kmalloc(buf_size, GFP_KERNEL);
147                 if (host_sys_buf == NULL) {
148                         status = -ENOMEM;
149                         goto func_end;
150                 }
151                 if (CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
152                         status = copy_from_user(host_sys_buf, host_buf,
153                                                 buf_size);
154                         if (status) {
155                                 kfree(host_sys_buf);
156                                 host_sys_buf = NULL;
157                                 status = -EFAULT;
158                                 goto func_end;
159                         }
160                 }
161         }
162 func_cont:
163         /* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
164          * channels. DPCCS is held to avoid race conditions with PCPY channels.
165          * If DPC is scheduled in process context (iosm_schedule) and any
166          * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
167          * we disable ALL DPCs. We will try to disable ONLY IO DPC later. */
168         spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
169         omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
170         if (pchnl->chnl_type == CHNL_PCPY) {
171                 /* This is a processor-copy channel. */
172                 if (!status && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
173                         /* Check buffer size on output channels for fit. */
174                         if (byte_size >
175                             io_buf_size(pchnl->chnl_mgr_obj->hio_mgr))
176                                 status = -EINVAL;
177
178                 }
179         }
180         if (!status) {
181                 /* Get a free chirp: */
182                 chnl_packet_obj =
183                     (struct chnl_irp *)lst_get_head(pchnl->free_packets_list);
184                 if (chnl_packet_obj == NULL)
185                         status = -EIO;
186
187         }
188         if (!status) {
189                 /* Enqueue the chirp on the chnl's IORequest queue: */
190                 chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
191                     host_buf;
192                 if (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)
193                         chnl_packet_obj->host_sys_buf = host_sys_buf;
194
195                 /*
196                  * Note: for dma chans dw_dsp_addr contains dsp address
197                  * of SM buffer.
198                  */
199                 DBC_ASSERT(chnl_mgr_obj->word_size != 0);
200                 /* DSP address */
201                 chnl_packet_obj->dsp_tx_addr =
202                     dw_dsp_addr / chnl_mgr_obj->word_size;
203                 chnl_packet_obj->byte_size = byte_size;
204                 chnl_packet_obj->buf_size = buf_size;
205                 /* Only valid for output channel */
206                 chnl_packet_obj->dw_arg = dw_arg;
207                 chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
208                                            CHNL_IOCSTATCOMPLETE);
209                 lst_put_tail(pchnl->pio_requests,
210                              (struct list_head *)chnl_packet_obj);
211                 pchnl->cio_reqs++;
212                 DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
213                 /*
214                  * If end of stream, update the channel state to prevent
215                  * more IOR's.
216                  */
217                 if (is_eos)
218                         pchnl->dw_state |= CHNL_STATEEOS;
219
220                 /* Legacy DSM Processor-Copy */
221                 DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
222                 /* Request IO from the DSP */
223                 io_request_chnl(chnl_mgr_obj->hio_mgr, pchnl,
224                                 (CHNL_IS_INPUT(pchnl->chnl_mode) ? IO_INPUT :
225                                  IO_OUTPUT), &mb_val);
226                 sched_dpc = true;
227
228         }
229         omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
230         spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
231         if (mb_val != 0)
232                 sm_interrupt_dsp(dev_ctxt, mb_val);
233
234         /* Schedule a DPC, to do the actual data transfer */
235         if (sched_dpc)
236                 iosm_schedule(chnl_mgr_obj->hio_mgr);
237
238 func_end:
239         return status;
240 }
241
242 /*
243  *  ======== bridge_chnl_cancel_io ========
244  *      Return all I/O requests to the client which have not yet been
245  *      transferred.  The channel's I/O completion object is
246  *      signalled, and all the I/O requests are queued as IOC's, with the
247  *      status field set to CHNL_IOCSTATCANCEL.
248  *      This call is typically used in abort situations, and is a prelude to
249  *      chnl_close();
250  */
251 int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
252 {
253         int status = 0;
254         struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
255         u32 chnl_id = -1;
256         s8 chnl_mode;
257         struct chnl_irp *chnl_packet_obj;
258         struct chnl_mgr *chnl_mgr_obj = NULL;
259
260         /* Check args: */
261         if (pchnl && pchnl->chnl_mgr_obj) {
262                 chnl_id = pchnl->chnl_id;
263                 chnl_mode = pchnl->chnl_mode;
264                 chnl_mgr_obj = pchnl->chnl_mgr_obj;
265         } else {
266                 status = -EFAULT;
267         }
268         if (status)
269                 goto func_end;
270
271         /*  Mark this channel as cancelled, to prevent further IORequests or
272          *  IORequests or dispatching. */
273         spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
274         pchnl->dw_state |= CHNL_STATECANCEL;
275         if (LST_IS_EMPTY(pchnl->pio_requests))
276                 goto func_cont;
277
278         if (pchnl->chnl_type == CHNL_PCPY) {
279                 /* Indicate we have no more buffers available for transfer: */
280                 if (CHNL_IS_INPUT(pchnl->chnl_mode)) {
281                         io_cancel_chnl(chnl_mgr_obj->hio_mgr, chnl_id);
282                 } else {
283                         /* Record that we no longer have output buffers
284                          * available: */
285                         chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
286                 }
287         }
288         /* Move all IOR's to IOC queue: */
289         while (!LST_IS_EMPTY(pchnl->pio_requests)) {
290                 chnl_packet_obj =
291                     (struct chnl_irp *)lst_get_head(pchnl->pio_requests);
292                 if (chnl_packet_obj) {
293                         chnl_packet_obj->byte_size = 0;
294                         chnl_packet_obj->status |= CHNL_IOCSTATCANCEL;
295                         lst_put_tail(pchnl->pio_completions,
296                                      (struct list_head *)chnl_packet_obj);
297                         pchnl->cio_cs++;
298                         pchnl->cio_reqs--;
299                         DBC_ASSERT(pchnl->cio_reqs >= 0);
300                 }
301         }
302 func_cont:
303         spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
304 func_end:
305         return status;
306 }
307
308 /*
309  *  ======== bridge_chnl_close ========
310  *  Purpose:
311  *      Ensures all pending I/O on this channel is cancelled, discards all
312  *      queued I/O completion notifications, then frees the resources allocated
313  *      for this channel, and makes the corresponding logical channel id
314  *      available for subsequent use.
315  */
316 int bridge_chnl_close(struct chnl_object *chnl_obj)
317 {
318         int status;
319         struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
320
321         /* Check args: */
322         if (!pchnl) {
323                 status = -EFAULT;
324                 goto func_cont;
325         }
326         {
327                 /* Cancel IO: this ensures no further IO requests or
328                  * notifications. */
329                 status = bridge_chnl_cancel_io(chnl_obj);
330         }
331 func_cont:
332         if (!status) {
333                 /* Assert I/O on this channel is now cancelled: Protects
334                  * from io_dpc. */
335                 DBC_ASSERT((pchnl->dw_state & CHNL_STATECANCEL));
336                 /* Invalidate channel object: Protects from
337                  * CHNL_GetIOCompletion(). */
338                 /* Free the slot in the channel manager: */
339                 pchnl->chnl_mgr_obj->ap_channel[pchnl->chnl_id] = NULL;
340                 spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
341                 pchnl->chnl_mgr_obj->open_channels -= 1;
342                 spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
343                 if (pchnl->ntfy_obj) {
344                         ntfy_delete(pchnl->ntfy_obj);
345                         kfree(pchnl->ntfy_obj);
346                         pchnl->ntfy_obj = NULL;
347                 }
348                 /* Reset channel event: (NOTE: user_event freed in user
349                  * context.). */
350                 if (pchnl->sync_event) {
351                         sync_reset_event(pchnl->sync_event);
352                         kfree(pchnl->sync_event);
353                         pchnl->sync_event = NULL;
354                 }
355                 /* Free I/O request and I/O completion queues: */
356                 if (pchnl->pio_completions) {
357                         free_chirp_list(pchnl->pio_completions);
358                         pchnl->pio_completions = NULL;
359                         pchnl->cio_cs = 0;
360                 }
361                 if (pchnl->pio_requests) {
362                         free_chirp_list(pchnl->pio_requests);
363                         pchnl->pio_requests = NULL;
364                         pchnl->cio_reqs = 0;
365                 }
366                 if (pchnl->free_packets_list) {
367                         free_chirp_list(pchnl->free_packets_list);
368                         pchnl->free_packets_list = NULL;
369                 }
370                 /* Release channel object. */
371                 kfree(pchnl);
372                 pchnl = NULL;
373         }
374         DBC_ENSURE(status || !pchnl);
375         return status;
376 }
377
378 /*
379  *  ======== bridge_chnl_create ========
380  *      Create a channel manager object, responsible for opening new channels
381  *      and closing old ones for a given board.
382  */
383 int bridge_chnl_create(struct chnl_mgr **channel_mgr,
384                               struct dev_object *hdev_obj,
385                               const struct chnl_mgrattrs *mgr_attrts)
386 {
387         int status = 0;
388         struct chnl_mgr *chnl_mgr_obj = NULL;
389         u8 max_channels;
390
391         /* Check DBC requirements: */
392         DBC_REQUIRE(channel_mgr != NULL);
393         DBC_REQUIRE(mgr_attrts != NULL);
394         DBC_REQUIRE(mgr_attrts->max_channels > 0);
395         DBC_REQUIRE(mgr_attrts->max_channels <= CHNL_MAXCHANNELS);
396         DBC_REQUIRE(mgr_attrts->word_size != 0);
397
398         /* Allocate channel manager object */
399         chnl_mgr_obj = kzalloc(sizeof(struct chnl_mgr), GFP_KERNEL);
400         if (chnl_mgr_obj) {
401                 /*
402                  * The max_channels attr must equal the # of supported chnls for
403                  * each transport(# chnls for PCPY = DDMA = ZCPY): i.e.
404                  *      mgr_attrts->max_channels = CHNL_MAXCHANNELS =
405                  *                       DDMA_MAXDDMACHNLS = DDMA_MAXZCPYCHNLS.
406                  */
407                 DBC_ASSERT(mgr_attrts->max_channels == CHNL_MAXCHANNELS);
408                 max_channels = CHNL_MAXCHANNELS + CHNL_MAXCHANNELS * CHNL_PCPY;
409                 /* Create array of channels */
410                 chnl_mgr_obj->ap_channel = kzalloc(sizeof(struct chnl_object *)
411                                                 * max_channels, GFP_KERNEL);
412                 if (chnl_mgr_obj->ap_channel) {
413                         /* Initialize chnl_mgr object */
414                         chnl_mgr_obj->dw_type = CHNL_TYPESM;
415                         chnl_mgr_obj->word_size = mgr_attrts->word_size;
416                         /* Total # chnls supported */
417                         chnl_mgr_obj->max_channels = max_channels;
418                         chnl_mgr_obj->open_channels = 0;
419                         chnl_mgr_obj->dw_output_mask = 0;
420                         chnl_mgr_obj->dw_last_output = 0;
421                         chnl_mgr_obj->hdev_obj = hdev_obj;
422                         spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
423                 } else {
424                         status = -ENOMEM;
425                 }
426         } else {
427                 status = -ENOMEM;
428         }
429
430         if (status) {
431                 bridge_chnl_destroy(chnl_mgr_obj);
432                 *channel_mgr = NULL;
433         } else {
434                 /* Return channel manager object to caller... */
435                 *channel_mgr = chnl_mgr_obj;
436         }
437         return status;
438 }
439
440 /*
441  *  ======== bridge_chnl_destroy ========
442  *  Purpose:
443  *      Close all open channels, and destroy the channel manager.
444  */
445 int bridge_chnl_destroy(struct chnl_mgr *hchnl_mgr)
446 {
447         int status = 0;
448         struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
449         u32 chnl_id;
450
451         if (hchnl_mgr) {
452                 /* Close all open channels: */
453                 for (chnl_id = 0; chnl_id < chnl_mgr_obj->max_channels;
454                      chnl_id++) {
455                         status =
456                             bridge_chnl_close(chnl_mgr_obj->ap_channel
457                                               [chnl_id]);
458                         if (status)
459                                 dev_dbg(bridge, "%s: Error status 0x%x\n",
460                                         __func__, status);
461                 }
462
463                 /* Free channel manager object: */
464                 kfree(chnl_mgr_obj->ap_channel);
465
466                 /* Set hchnl_mgr to NULL in device object. */
467                 dev_set_chnl_mgr(chnl_mgr_obj->hdev_obj, NULL);
468                 /* Free this Chnl Mgr object: */
469                 kfree(hchnl_mgr);
470         } else {
471                 status = -EFAULT;
472         }
473         return status;
474 }
475
476 /*
477  *  ======== bridge_chnl_flush_io ========
478  *  purpose:
479  *      Flushes all the outstanding data requests on a channel.
480  */
481 int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
482 {
483         int status = 0;
484         struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
485         s8 chnl_mode = -1;
486         struct chnl_mgr *chnl_mgr_obj;
487         struct chnl_ioc chnl_ioc_obj;
488         /* Check args: */
489         if (pchnl) {
490                 if ((timeout == CHNL_IOCNOWAIT)
491                     && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
492                         status = -EINVAL;
493                 } else {
494                         chnl_mode = pchnl->chnl_mode;
495                         chnl_mgr_obj = pchnl->chnl_mgr_obj;
496                 }
497         } else {
498                 status = -EFAULT;
499         }
500         if (!status) {
501                 /* Note: Currently, if another thread continues to add IO
502                  * requests to this channel, this function will continue to
503                  * flush all such queued IO requests. */
504                 if (CHNL_IS_OUTPUT(chnl_mode)
505                     && (pchnl->chnl_type == CHNL_PCPY)) {
506                         /* Wait for IO completions, up to the specified
507                          * timeout: */
508                         while (!LST_IS_EMPTY(pchnl->pio_requests) && !status) {
509                                 status = bridge_chnl_get_ioc(chnl_obj,
510                                                 timeout, &chnl_ioc_obj);
511                                 if (status)
512                                         continue;
513
514                                 if (chnl_ioc_obj.status & CHNL_IOCSTATTIMEOUT)
515                                         status = -ETIMEDOUT;
516
517                         }
518                 } else {
519                         status = bridge_chnl_cancel_io(chnl_obj);
520                         /* Now, leave the channel in the ready state: */
521                         pchnl->dw_state &= ~CHNL_STATECANCEL;
522                 }
523         }
524         DBC_ENSURE(status || LST_IS_EMPTY(pchnl->pio_requests));
525         return status;
526 }
527
528 /*
529  *  ======== bridge_chnl_get_info ========
530  *  Purpose:
531  *      Retrieve information related to a channel.
532  */
533 int bridge_chnl_get_info(struct chnl_object *chnl_obj,
534                              struct chnl_info *channel_info)
535 {
536         int status = 0;
537         struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
538         if (channel_info != NULL) {
539                 if (pchnl) {
540                         /* Return the requested information: */
541                         channel_info->hchnl_mgr = pchnl->chnl_mgr_obj;
542                         channel_info->event_obj = pchnl->user_event;
543                         channel_info->cnhl_id = pchnl->chnl_id;
544                         channel_info->dw_mode = pchnl->chnl_mode;
545                         channel_info->bytes_tx = pchnl->bytes_moved;
546                         channel_info->process = pchnl->process;
547                         channel_info->sync_event = pchnl->sync_event;
548                         channel_info->cio_cs = pchnl->cio_cs;
549                         channel_info->cio_reqs = pchnl->cio_reqs;
550                         channel_info->dw_state = pchnl->dw_state;
551                 } else {
552                         status = -EFAULT;
553                 }
554         } else {
555                 status = -EFAULT;
556         }
557         return status;
558 }
559
560 /*
561  *  ======== bridge_chnl_get_ioc ========
562  *      Optionally wait for I/O completion on a channel.  Dequeue an I/O
563  *      completion record, which contains information about the completed
564  *      I/O request.
565  *      Note: Ensures Channel Invariant (see notes above).
566  */
567 int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
568                             struct chnl_ioc *chan_ioc)
569 {
570         int status = 0;
571         struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
572         struct chnl_irp *chnl_packet_obj;
573         int stat_sync;
574         bool dequeue_ioc = true;
575         struct chnl_ioc ioc = { NULL, 0, 0, 0, 0 };
576         u8 *host_sys_buf = NULL;
577         struct bridge_dev_context *dev_ctxt;
578         struct dev_object *dev_obj;
579
580         /* Check args: */
581         if (!chan_ioc || !pchnl) {
582                 status = -EFAULT;
583         } else if (timeout == CHNL_IOCNOWAIT) {
584                 if (LST_IS_EMPTY(pchnl->pio_completions))
585                         status = -EREMOTEIO;
586
587         }
588
589         dev_obj = dev_get_first();
590         dev_get_bridge_context(dev_obj, &dev_ctxt);
591         if (!dev_ctxt)
592                 status = -EFAULT;
593
594         if (status)
595                 goto func_end;
596
597         ioc.status = CHNL_IOCSTATCOMPLETE;
598         if (timeout !=
599             CHNL_IOCNOWAIT && LST_IS_EMPTY(pchnl->pio_completions)) {
600                 if (timeout == CHNL_IOCINFINITE)
601                         timeout = SYNC_INFINITE;
602
603                 stat_sync = sync_wait_on_event(pchnl->sync_event, timeout);
604                 if (stat_sync == -ETIME) {
605                         /* No response from DSP */
606                         ioc.status |= CHNL_IOCSTATTIMEOUT;
607                         dequeue_ioc = false;
608                 } else if (stat_sync == -EPERM) {
609                         /* This can occur when the user mode thread is
610                          * aborted (^C), or when _VWIN32_WaitSingleObject()
611                          * fails due to unkown causes. */
612                         /* Even though Wait failed, there may be something in
613                          * the Q: */
614                         if (LST_IS_EMPTY(pchnl->pio_completions)) {
615                                 ioc.status |= CHNL_IOCSTATCANCEL;
616                                 dequeue_ioc = false;
617                         }
618                 }
619         }
620         /* See comment in AddIOReq */
621         spin_lock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
622         omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
623         if (dequeue_ioc) {
624                 /* Dequeue IOC and set chan_ioc; */
625                 DBC_ASSERT(!LST_IS_EMPTY(pchnl->pio_completions));
626                 chnl_packet_obj =
627                     (struct chnl_irp *)lst_get_head(pchnl->pio_completions);
628                 /* Update chan_ioc from channel state and chirp: */
629                 if (chnl_packet_obj) {
630                         pchnl->cio_cs--;
631                         /*  If this is a zero-copy channel, then set IOC's pbuf
632                          *  to the DSP's address. This DSP address will get
633                          *  translated to user's virtual addr later. */
634                         {
635                                 host_sys_buf = chnl_packet_obj->host_sys_buf;
636                                 ioc.pbuf = chnl_packet_obj->host_user_buf;
637                         }
638                         ioc.byte_size = chnl_packet_obj->byte_size;
639                         ioc.buf_size = chnl_packet_obj->buf_size;
640                         ioc.dw_arg = chnl_packet_obj->dw_arg;
641                         ioc.status |= chnl_packet_obj->status;
642                         /* Place the used chirp on the free list: */
643                         lst_put_tail(pchnl->free_packets_list,
644                                      (struct list_head *)chnl_packet_obj);
645                 } else {
646                         ioc.pbuf = NULL;
647                         ioc.byte_size = 0;
648                 }
649         } else {
650                 ioc.pbuf = NULL;
651                 ioc.byte_size = 0;
652                 ioc.dw_arg = 0;
653                 ioc.buf_size = 0;
654         }
655         /* Ensure invariant: If any IOC's are queued for this channel... */
656         if (!LST_IS_EMPTY(pchnl->pio_completions)) {
657                 /*  Since DSPStream_Reclaim() does not take a timeout
658                  *  parameter, we pass the stream's timeout value to
659                  *  bridge_chnl_get_ioc. We cannot determine whether or not
660                  *  we have waited in User mode. Since the stream's timeout
661                  *  value may be non-zero, we still have to set the event.
662                  *  Therefore, this optimization is taken out.
663                  *
664                  *  if (timeout == CHNL_IOCNOWAIT) {
665                  *    ... ensure event is set..
666                  *      sync_set_event(pchnl->sync_event);
667                  *  } */
668                 sync_set_event(pchnl->sync_event);
669         } else {
670                 /* else, if list is empty, ensure event is reset. */
671                 sync_reset_event(pchnl->sync_event);
672         }
673         omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
674         spin_unlock_bh(&pchnl->chnl_mgr_obj->chnl_mgr_lock);
675         if (dequeue_ioc
676             && (pchnl->chnl_type == CHNL_PCPY && pchnl->chnl_id > 1)) {
677                 if (!(ioc.pbuf < (void *)USERMODE_ADDR))
678                         goto func_cont;
679
680                 /* If the addr is in user mode, then copy it */
681                 if (!host_sys_buf || !ioc.pbuf) {
682                         status = -EFAULT;
683                         goto func_cont;
684                 }
685                 if (!CHNL_IS_INPUT(pchnl->chnl_mode))
686                         goto func_cont1;
687
688                 /*host_user_buf */
689                 status = copy_to_user(ioc.pbuf, host_sys_buf, ioc.byte_size);
690                 if (status) {
691                         if (current->flags & PF_EXITING)
692                                 status = 0;
693                 }
694                 if (status)
695                         status = -EFAULT;
696 func_cont1:
697                 kfree(host_sys_buf);
698         }
699 func_cont:
700         /* Update User's IOC block: */
701         *chan_ioc = ioc;
702 func_end:
703         return status;
704 }
705
706 /*
707  *  ======== bridge_chnl_get_mgr_info ========
708  *      Retrieve information related to the channel manager.
709  */
710 int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, u32 ch_id,
711                                  struct chnl_mgrinfo *mgr_info)
712 {
713         int status = 0;
714         struct chnl_mgr *chnl_mgr_obj = (struct chnl_mgr *)hchnl_mgr;
715
716         if (mgr_info != NULL) {
717                 if (ch_id <= CHNL_MAXCHANNELS) {
718                         if (hchnl_mgr) {
719                                 /* Return the requested information: */
720                                 mgr_info->chnl_obj =
721                                     chnl_mgr_obj->ap_channel[ch_id];
722                                 mgr_info->open_channels =
723                                     chnl_mgr_obj->open_channels;
724                                 mgr_info->dw_type = chnl_mgr_obj->dw_type;
725                                 /* total # of chnls */
726                                 mgr_info->max_channels =
727                                     chnl_mgr_obj->max_channels;
728                         } else {
729                                 status = -EFAULT;
730                         }
731                 } else {
732                         status = -ECHRNG;
733                 }
734         } else {
735                 status = -EFAULT;
736         }
737
738         return status;
739 }
740
741 /*
742  *  ======== bridge_chnl_idle ========
743  *      Idles a particular channel.
744  */
745 int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
746                             bool flush_data)
747 {
748         s8 chnl_mode;
749         struct chnl_mgr *chnl_mgr_obj;
750         int status = 0;
751
752         DBC_REQUIRE(chnl_obj);
753
754         chnl_mode = chnl_obj->chnl_mode;
755         chnl_mgr_obj = chnl_obj->chnl_mgr_obj;
756
757         if (CHNL_IS_OUTPUT(chnl_mode) && !flush_data) {
758                 /* Wait for IO completions, up to the specified timeout: */
759                 status = bridge_chnl_flush_io(chnl_obj, timeout);
760         } else {
761                 status = bridge_chnl_cancel_io(chnl_obj);
762
763                 /* Reset the byte count and put channel back in ready state. */
764                 chnl_obj->bytes_moved = 0;
765                 chnl_obj->dw_state &= ~CHNL_STATECANCEL;
766         }
767
768         return status;
769 }
770
771 /*
772  *  ======== bridge_chnl_open ========
773  *      Open a new half-duplex channel to the DSP board.
774  */
775 int bridge_chnl_open(struct chnl_object **chnl,
776                             struct chnl_mgr *hchnl_mgr, s8 chnl_mode,
777                             u32 ch_id, const struct chnl_attr *pattrs)
778 {
779         int status = 0;
780         struct chnl_mgr *chnl_mgr_obj = hchnl_mgr;
781         struct chnl_object *pchnl = NULL;
782         struct sync_object *sync_event = NULL;
783         /* Ensure DBC requirements: */
784         DBC_REQUIRE(chnl != NULL);
785         DBC_REQUIRE(pattrs != NULL);
786         DBC_REQUIRE(hchnl_mgr != NULL);
787         *chnl = NULL;
788         /* Validate Args: */
789         if (pattrs->uio_reqs == 0) {
790                 status = -EINVAL;
791         } else {
792                 if (!hchnl_mgr) {
793                         status = -EFAULT;
794                 } else {
795                         if (ch_id != CHNL_PICKFREE) {
796                                 if (ch_id >= chnl_mgr_obj->max_channels)
797                                         status = -ECHRNG;
798                                 else if (chnl_mgr_obj->ap_channel[ch_id] !=
799                                          NULL)
800                                         status = -EALREADY;
801                         } else {
802                                 /* Check for free channel */
803                                 status =
804                                     search_free_channel(chnl_mgr_obj, &ch_id);
805                         }
806                 }
807         }
808         if (status)
809                 goto func_end;
810
811         DBC_ASSERT(ch_id < chnl_mgr_obj->max_channels);
812         /* Create channel object: */
813         pchnl = kzalloc(sizeof(struct chnl_object), GFP_KERNEL);
814         if (!pchnl) {
815                 status = -ENOMEM;
816                 goto func_end;
817         }
818         /* Protect queues from io_dpc: */
819         pchnl->dw_state = CHNL_STATECANCEL;
820         /* Allocate initial IOR and IOC queues: */
821         pchnl->free_packets_list = create_chirp_list(pattrs->uio_reqs);
822         pchnl->pio_requests = create_chirp_list(0);
823         pchnl->pio_completions = create_chirp_list(0);
824         pchnl->chnl_packets = pattrs->uio_reqs;
825         pchnl->cio_cs = 0;
826         pchnl->cio_reqs = 0;
827         sync_event = kzalloc(sizeof(struct sync_object), GFP_KERNEL);
828         if (sync_event)
829                 sync_init_event(sync_event);
830         else
831                 status = -ENOMEM;
832
833         if (!status) {
834                 pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
835                                                         GFP_KERNEL);
836                 if (pchnl->ntfy_obj)
837                         ntfy_init(pchnl->ntfy_obj);
838                 else
839                         status = -ENOMEM;
840         }
841
842         if (!status) {
843                 if (pchnl->pio_completions && pchnl->pio_requests &&
844                     pchnl->free_packets_list) {
845                         /* Initialize CHNL object fields: */
846                         pchnl->chnl_mgr_obj = chnl_mgr_obj;
847                         pchnl->chnl_id = ch_id;
848                         pchnl->chnl_mode = chnl_mode;
849                         pchnl->user_event = sync_event;
850                         pchnl->sync_event = sync_event;
851                         /* Get the process handle */
852                         pchnl->process = current->tgid;
853                         pchnl->pcb_arg = 0;
854                         pchnl->bytes_moved = 0;
855                         /* Default to proc-copy */
856                         pchnl->chnl_type = CHNL_PCPY;
857                 } else {
858                         status = -ENOMEM;
859                 }
860         }
861
862         if (status) {
863                 /* Free memory */
864                 if (pchnl->pio_completions) {
865                         free_chirp_list(pchnl->pio_completions);
866                         pchnl->pio_completions = NULL;
867                         pchnl->cio_cs = 0;
868                 }
869                 if (pchnl->pio_requests) {
870                         free_chirp_list(pchnl->pio_requests);
871                         pchnl->pio_requests = NULL;
872                 }
873                 if (pchnl->free_packets_list) {
874                         free_chirp_list(pchnl->free_packets_list);
875                         pchnl->free_packets_list = NULL;
876                 }
877                 kfree(sync_event);
878                 sync_event = NULL;
879
880                 if (pchnl->ntfy_obj) {
881                         ntfy_delete(pchnl->ntfy_obj);
882                         kfree(pchnl->ntfy_obj);
883                         pchnl->ntfy_obj = NULL;
884                 }
885                 kfree(pchnl);
886         } else {
887                 /* Insert channel object in channel manager: */
888                 chnl_mgr_obj->ap_channel[pchnl->chnl_id] = pchnl;
889                 spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
890                 chnl_mgr_obj->open_channels++;
891                 spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
892                 /* Return result... */
893                 pchnl->dw_state = CHNL_STATEREADY;
894                 *chnl = pchnl;
895         }
896 func_end:
897         DBC_ENSURE((!status && pchnl) || (*chnl == NULL));
898         return status;
899 }
900
901 /*
902  *  ======== bridge_chnl_register_notify ========
903  *      Registers for events on a particular channel.
904  */
905 int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
906                                     u32 event_mask, u32 notify_type,
907                                     struct dsp_notification *hnotification)
908 {
909         int status = 0;
910
911         DBC_ASSERT(!(event_mask & ~(DSP_STREAMDONE | DSP_STREAMIOCOMPLETION)));
912
913         if (event_mask)
914                 status = ntfy_register(chnl_obj->ntfy_obj, hnotification,
915                                                 event_mask, notify_type);
916         else
917                 status = ntfy_unregister(chnl_obj->ntfy_obj, hnotification);
918
919         return status;
920 }
921
922 /*
923  *  ======== create_chirp_list ========
924  *  Purpose:
925  *      Initialize a queue of channel I/O Request/Completion packets.
926  *  Parameters:
927  *      chirps:     Number of Chirps to allocate.
928  *  Returns:
929  *      Pointer to queue of IRPs, or NULL.
930  *  Requires:
931  *  Ensures:
932  */
933 static struct lst_list *create_chirp_list(u32 chirps)
934 {
935         struct lst_list *chirp_list;
936         struct chnl_irp *chnl_packet_obj;
937         u32 i;
938
939         chirp_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
940
941         if (chirp_list) {
942                 INIT_LIST_HEAD(&chirp_list->head);
943                 /* Make N chirps and place on queue. */
944                 for (i = 0; (i < chirps)
945                      && ((chnl_packet_obj = make_new_chirp()) != NULL); i++) {
946                         lst_put_tail(chirp_list,
947                                      (struct list_head *)chnl_packet_obj);
948                 }
949
950                 /* If we couldn't allocate all chirps, free those allocated: */
951                 if (i != chirps) {
952                         free_chirp_list(chirp_list);
953                         chirp_list = NULL;
954                 }
955         }
956
957         return chirp_list;
958 }
959
960 /*
961  *  ======== free_chirp_list ========
962  *  Purpose:
963  *      Free the queue of Chirps.
964  */
965 static void free_chirp_list(struct lst_list *chirp_list)
966 {
967         DBC_REQUIRE(chirp_list != NULL);
968
969         while (!LST_IS_EMPTY(chirp_list))
970                 kfree(lst_get_head(chirp_list));
971
972         kfree(chirp_list);
973 }
974
975 /*
976  *  ======== make_new_chirp ========
977  *      Allocate the memory for a new channel IRP.
978  */
979 static struct chnl_irp *make_new_chirp(void)
980 {
981         struct chnl_irp *chnl_packet_obj;
982
983         chnl_packet_obj = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL);
984         if (chnl_packet_obj != NULL) {
985                 /* lst_init_elem only resets the list's member values. */
986                 lst_init_elem(&chnl_packet_obj->link);
987         }
988
989         return chnl_packet_obj;
990 }
991
992 /*
993  *  ======== search_free_channel ========
994  *      Search for a free channel slot in the array of channel pointers.
995  */
996 static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
997                                       u32 *chnl)
998 {
999         int status = -ENOSR;
1000         u32 i;
1001
1002         DBC_REQUIRE(chnl_mgr_obj);
1003
1004         for (i = 0; i < chnl_mgr_obj->max_channels; i++) {
1005                 if (chnl_mgr_obj->ap_channel[i] == NULL) {
1006                         status = 0;
1007                         *chnl = i;
1008                         break;
1009                 }
1010         }
1011
1012         return status;
1013 }