1 /* Cypress West Bridge API source file (cyaslowlevel.c)
2 ## ===========================
3 ## Copyright (C) 2010 Cypress Semiconductor
5 ## This program is free software; you can redistribute it and/or
6 ## modify it under the terms of the GNU General Public License
7 ## as published by the Free Software Foundation; either version 2
8 ## of the License, or (at your option) any later version.
10 ## This program is distributed in the hope that it will be useful,
11 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ## GNU General Public License for more details.
15 ## You should have received a copy of the GNU General Public License
16 ## along with this program; if not, write to the Free Software
17 ## Foundation, Inc., 51 Franklin Street, Fifth Floor
18 ## Boston, MA 02110-1301, USA.
19 ## ===========================
22 #include "../../include/linux/westbridge/cyashal.h"
23 #include "../../include/linux/westbridge/cyascast.h"
24 #include "../../include/linux/westbridge/cyasdevice.h"
25 #include "../../include/linux/westbridge/cyaslowlevel.h"
26 #include "../../include/linux/westbridge/cyasintr.h"
27 #include "../../include/linux/westbridge/cyaserr.h"
28 #include "../../include/linux/westbridge/cyasregs.h"
30 static const uint32_t cy_as_low_level_timeout_count = 65536 * 4;
32 /* Forward declaration */
33 static cy_as_return_status_t cy_as_send_one(cy_as_device *dev_p,
34 cy_as_ll_request_response *req_p);
37 * This array holds the size of the largest request we will ever recevie from
38 * the West Bridge device per context. The size is in 16 bit words. Note a
39 * size of 0xffff indicates that there will be no requests on this context
42 static uint16_t max_request_length[CY_RQT_CONTEXT_COUNT] = {
43 8, /* CY_RQT_GENERAL_RQT_CONTEXT - CY_RQT_INITIALIZATION_COMPLETE */
44 8, /* CY_RQT_RESOURCE_RQT_CONTEXT - none */
45 8, /* CY_RQT_STORAGE_RQT_CONTEXT - CY_RQT_MEDIA_CHANGED */
46 128, /* CY_RQT_USB_RQT_CONTEXT - CY_RQT_USB_EVENT */
47 8 /* CY_RQT_TUR_RQT_CONTEXT - CY_RQT_TURBO_CMD_FROM_HOST */
51 * For the given context, this function removes the request node at the head
52 * of the queue from the context. This is called after all processing has
53 * occurred on the given request and response and we are ready to remove this
54 * entry from the queue.
57 cy_as_ll_remove_request_queue_head(cy_as_device *dev_p, cy_as_context *ctxt_p)
60 cy_as_ll_request_list_node *node_p;
63 cy_as_hal_assert(ctxt_p->request_queue_p != 0);
65 mask = cy_as_hal_disable_interrupts();
66 node_p = ctxt_p->request_queue_p;
67 ctxt_p->request_queue_p = node_p->next;
68 cy_as_hal_enable_interrupts(mask);
75 * note that the caller allocates and destroys the request and
76 * response. generally the destroy happens in the callback for
77 * async requests and after the wait returns for sync. the
78 * request and response may not actually be destroyed but may be
79 * managed in other ways as well. it is the responsibilty of
80 * the caller to deal with these in any case. the caller can do
81 * this in the request/response callback function.
83 state = cy_as_hal_disable_interrupts();
84 cy_as_hal_c_b_free(node_p);
85 cy_as_hal_enable_interrupts(state);
89 * For the context given, this function sends the next request to
90 * West Bridge via the mailbox register, if the next request is
91 * ready to be sent and has not already been sent.
94 cy_as_ll_send_next_request(cy_as_device *dev_p, cy_as_context *ctxt_p)
96 cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
99 * ret == ret is equivalent to while (1) but eliminates compiler
100 * warnings for some compilers.
103 cy_as_ll_request_list_node *node_p = ctxt_p->request_queue_p;
107 if (cy_as_request_get_node_state(node_p) !=
108 CY_AS_REQUEST_LIST_STATE_QUEUED)
111 cy_as_request_set_node_state(node_p,
112 CY_AS_REQUEST_LIST_STATE_WAITING);
113 ret = cy_as_send_one(dev_p, node_p->rqt);
114 if (ret == CY_AS_ERROR_SUCCESS)
118 * if an error occurs in sending the request, tell the requester
119 * about the error and remove the request from the queue.
121 cy_as_request_set_node_state(node_p,
122 CY_AS_REQUEST_LIST_STATE_RECEIVED);
123 node_p->callback(dev_p, ctxt_p->number,
124 node_p->rqt, node_p->resp, ret);
125 cy_as_ll_remove_request_queue_head(dev_p, ctxt_p);
128 * this falls through to the while loop to send the next request
129 * since the previous request did not get sent.
135 * This method removes an entry from the request queue of a given context.
136 * The entry is removed only if it is not in transit.
138 cy_as_remove_request_result_t
139 cy_as_ll_remove_request(cy_as_device *dev_p, cy_as_context *ctxt_p,
140 cy_as_ll_request_response *req_p, cy_bool force)
143 cy_as_ll_request_list_node *node_p;
144 cy_as_ll_request_list_node *tmp_p;
147 imask = cy_as_hal_disable_interrupts();
148 if (ctxt_p->request_queue_p != 0 &&
149 ctxt_p->request_queue_p->rqt == req_p) {
150 node_p = ctxt_p->request_queue_p;
151 if ((cy_as_request_get_node_state(node_p) ==
152 CY_AS_REQUEST_LIST_STATE_WAITING) && (!force)) {
153 cy_as_hal_enable_interrupts(imask);
154 return cy_as_remove_request_in_transit;
157 ctxt_p->request_queue_p = node_p->next;
159 tmp_p = ctxt_p->request_queue_p;
160 while (tmp_p != 0 && tmp_p->next != 0 &&
161 tmp_p->next->rqt != req_p)
164 if (tmp_p == 0 || tmp_p->next == 0) {
165 cy_as_hal_enable_interrupts(imask);
166 return cy_as_remove_request_not_found;
169 node_p = tmp_p->next;
170 tmp_p->next = node_p->next;
173 if (node_p->callback)
174 node_p->callback(dev_p, ctxt_p->number, node_p->rqt,
175 node_p->resp, CY_AS_ERROR_CANCELED);
177 state = cy_as_hal_disable_interrupts();
178 cy_as_hal_c_b_free(node_p);
179 cy_as_hal_enable_interrupts(state);
181 cy_as_hal_enable_interrupts(imask);
182 return cy_as_remove_request_sucessful;
186 cy_as_ll_remove_all_requests(cy_as_device *dev_p, cy_as_context *ctxt_p)
188 cy_as_ll_request_list_node *node = ctxt_p->request_queue_p;
191 if (cy_as_request_get_node_state(ctxt_p->request_queue_p) !=
192 CY_AS_REQUEST_LIST_STATE_RECEIVED)
193 cy_as_ll_remove_request(dev_p, ctxt_p,
200 cy_as_ll_is_in_queue(cy_as_context *ctxt_p, cy_as_ll_request_response *req_p)
203 cy_as_ll_request_list_node *node_p;
205 mask = cy_as_hal_disable_interrupts();
206 node_p = ctxt_p->request_queue_p;
208 if (node_p->rqt == req_p) {
209 cy_as_hal_enable_interrupts(mask);
212 node_p = node_p->next;
214 cy_as_hal_enable_interrupts(mask);
219 * This is the handler for mailbox data when we are trying to send data
220 * to the West Bridge firmware. The firmware may be trying to send us
221 * data and we need to queue this data to allow the firmware to move
222 * forward and be in a state to receive our request. Here we just queue
223 * the data and it is processed at a later time by the mailbox interrupt
227 cy_as_ll_queue_mailbox_data(cy_as_device *dev_p)
229 cy_as_context *ctxt_p;
234 /* Read the data from mailbox 0 to determine what to do with the data */
235 for (i = 3; i >= 0; i--)
236 data[i] = cy_as_hal_read_register(dev_p->tag,
237 cy_cast_int2U_int16(CY_AS_MEM_P0_MAILBOX0 + i));
239 context = cy_as_mbox_get_context(data[0]);
240 if (context >= CY_RQT_CONTEXT_COUNT) {
241 cy_as_hal_print_message("mailbox request/response received "
242 "with invalid context value (%d)\n", context);
246 ctxt_p = dev_p->context[context];
249 * if we have queued too much data, drop future data.
251 cy_as_hal_assert(ctxt_p->queue_index * sizeof(uint16_t) +
252 sizeof(data) <= sizeof(ctxt_p->data_queue));
254 for (i = 0; i < 4; i++)
255 ctxt_p->data_queue[ctxt_p->queue_index++] = data[i];
257 cy_as_hal_assert((ctxt_p->queue_index % 4) == 0);
258 dev_p->ll_queued_data = cy_true;
262 cy_as_mail_box_process_data(cy_as_device *dev_p, uint16_t *data)
264 cy_as_context *ctxt_p;
267 cy_as_ll_request_response *rec_p;
271 context = cy_as_mbox_get_context(data[0]);
272 if (context >= CY_RQT_CONTEXT_COUNT) {
273 cy_as_hal_print_message("mailbox request/response received "
274 "with invalid context value (%d)\n", context);
278 ctxt_p = dev_p->context[context];
280 if (cy_as_mbox_is_request(data[0])) {
281 cy_as_hal_assert(ctxt_p->req_p != 0);
282 rec_p = ctxt_p->req_p;
283 len_p = &ctxt_p->request_length;
286 if (ctxt_p->request_queue_p == 0 ||
287 cy_as_request_get_node_state(ctxt_p->request_queue_p)
288 != CY_AS_REQUEST_LIST_STATE_WAITING) {
289 cy_as_hal_print_message("mailbox response received on "
290 "context that was not expecting a response\n");
291 cy_as_hal_print_message(" context: %d\n", context);
292 cy_as_hal_print_message(" contents: 0x%04x 0x%04x "
294 data[0], data[1], data[2], data[3]);
295 if (ctxt_p->request_queue_p != 0)
296 cy_as_hal_print_message(" state: 0x%02x\n",
297 ctxt_p->request_queue_p->state);
301 /* Make sure the request has an associated response */
302 cy_as_hal_assert(ctxt_p->request_queue_p->resp != 0);
304 rec_p = ctxt_p->request_queue_p->resp;
305 len_p = &ctxt_p->request_queue_p->length;
308 if (rec_p->stored == 0) {
310 * this is the first cycle of the response
312 cy_as_ll_request_response__set_code(rec_p,
313 cy_as_mbox_get_code(data[0]));
314 cy_as_ll_request_response__set_context(rec_p, context);
316 if (cy_as_mbox_is_last(data[0])) {
317 /* This is a single cycle response */
318 *len_p = rec_p->length;
321 /* Ensure that enough memory has been
322 * reserved for the response. */
323 cy_as_hal_assert(rec_p->length >= data[1]);
324 *len_p = (data[1] < rec_p->length) ?
325 data[1] : rec_p->length;
331 /* Trasnfer the data from the mailboxes to the response */
332 while (rec_p->stored < *len_p && st < 4)
333 rec_p->data[rec_p->stored++] = data[st++];
335 if (cy_as_mbox_is_last(data[0])) {
336 /* NB: The call-back that is made below can cause the
337 * addition of more data in this queue, thus causing
338 * a recursive overflow of the queue. this is prevented
339 * by removing the request entry that is currently
340 * being passed up from the data queue. if this is done,
341 * the queue only needs to be as long as two request
342 * entries from west bridge.
344 if ((ctxt_p->rqt_index > 0) &&
345 (ctxt_p->rqt_index <= ctxt_p->queue_index)) {
347 src = ctxt_p->rqt_index;
349 while (src < ctxt_p->queue_index)
350 ctxt_p->data_queue[dest++] =
351 ctxt_p->data_queue[src++];
353 ctxt_p->rqt_index = 0;
354 ctxt_p->queue_index = dest;
355 cy_as_hal_assert((ctxt_p->queue_index % 4) == 0);
358 if (ctxt_p->request_queue_p != 0 && rec_p ==
359 ctxt_p->request_queue_p->resp) {
361 * if this is the last cycle of the response, call the
362 * callback and reset for the next response.
364 cy_as_ll_request_response *resp_p =
365 ctxt_p->request_queue_p->resp;
366 resp_p->length = ctxt_p->request_queue_p->length;
367 cy_as_request_set_node_state(ctxt_p->request_queue_p,
368 CY_AS_REQUEST_LIST_STATE_RECEIVED);
370 cy_as_device_set_in_callback(dev_p);
371 ctxt_p->request_queue_p->callback(dev_p, context,
372 ctxt_p->request_queue_p->rqt,
373 resp_p, CY_AS_ERROR_SUCCESS);
375 cy_as_device_clear_in_callback(dev_p);
377 cy_as_ll_remove_request_queue_head(dev_p, ctxt_p);
378 cy_as_ll_send_next_request(dev_p, ctxt_p);
380 /* Send the request to the appropriate
381 * module to handle */
382 cy_as_ll_request_response *request_p = ctxt_p->req_p;
384 if (ctxt_p->request_callback) {
385 cy_as_device_set_in_callback(dev_p);
386 ctxt_p->request_callback(dev_p, context,
387 request_p, 0, CY_AS_ERROR_SUCCESS);
388 cy_as_device_clear_in_callback(dev_p);
390 cy_as_ll_init_request(request_p, 0,
391 context, request_p->length);
392 ctxt_p->req_p = request_p;
398 * This is the handler for processing queued mailbox data
401 cy_as_mail_box_queued_data_handler(cy_as_device *dev_p)
406 * if more data gets queued in between our entering this call
407 * and the end of the iteration on all contexts; we should
408 * continue processing the queued data.
410 while (dev_p->ll_queued_data) {
411 dev_p->ll_queued_data = cy_false;
412 for (i = 0; i < CY_RQT_CONTEXT_COUNT; i++) {
414 cy_as_context *ctxt_p = dev_p->context[i];
415 cy_as_hal_assert((ctxt_p->queue_index % 4) == 0);
418 while (offset < ctxt_p->queue_index) {
419 ctxt_p->rqt_index = offset + 4;
420 cy_as_mail_box_process_data(dev_p,
421 ctxt_p->data_queue + offset);
422 offset = ctxt_p->rqt_index;
424 ctxt_p->queue_index = 0;
430 * This is the handler for the mailbox interrupt. This function reads
431 * data from the mailbox registers until a complete request or response
432 * is received. When a complete request is received, the callback
433 * associated with requests on that context is called. When a complete
434 * response is recevied, the callback associated with the request that
435 * generated the reponse is called.
438 cy_as_mail_box_interrupt_handler(cy_as_device *dev_p)
440 cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
443 * queue the mailbox data to preserve
444 * order for later processing.
446 cy_as_ll_queue_mailbox_data(dev_p);
449 * process what was queued and anything that may be pending
451 cy_as_mail_box_queued_data_handler(dev_p);
454 cy_as_return_status_t
455 cy_as_ll_start(cy_as_device *dev_p)
459 if (cy_as_device_is_low_level_running(dev_p))
460 return CY_AS_ERROR_ALREADY_RUNNING;
462 dev_p->ll_sending_rqt = cy_false;
463 dev_p->ll_abort_curr_rqt = cy_false;
465 for (i = 0; i < CY_RQT_CONTEXT_COUNT; i++) {
466 dev_p->context[i] = (cy_as_context *)
467 cy_as_hal_alloc(sizeof(cy_as_context));
468 if (dev_p->context[i] == 0)
469 return CY_AS_ERROR_OUT_OF_MEMORY;
471 dev_p->context[i]->number = (uint8_t)i;
472 dev_p->context[i]->request_callback = 0;
473 dev_p->context[i]->request_queue_p = 0;
474 dev_p->context[i]->last_node_p = 0;
475 dev_p->context[i]->req_p = cy_as_ll_create_request(dev_p,
476 0, (uint8_t)i, max_request_length[i]);
477 dev_p->context[i]->queue_index = 0;
479 if (!cy_as_hal_create_sleep_channel
480 (&dev_p->context[i]->channel))
481 return CY_AS_ERROR_CREATE_SLEEP_CHANNEL_FAILED;
484 cy_as_device_set_low_level_running(dev_p);
485 return CY_AS_ERROR_SUCCESS;
489 * Shutdown the low level communications module. This operation will
490 * also cancel any queued low level requests.
492 cy_as_return_status_t
493 cy_as_ll_stop(cy_as_device *dev_p)
496 cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
497 cy_as_context *ctxt_p;
500 for (i = 0; i < CY_RQT_CONTEXT_COUNT; i++) {
501 ctxt_p = dev_p->context[i];
502 if (!cy_as_hal_destroy_sleep_channel(&ctxt_p->channel))
503 return CY_AS_ERROR_DESTROY_SLEEP_CHANNEL_FAILED;
506 * now, free any queued requests and assocaited responses
508 while (ctxt_p->request_queue_p) {
510 cy_as_ll_request_list_node *node_p =
511 ctxt_p->request_queue_p;
513 /* Mark this pair as in a cancel operation */
514 cy_as_request_set_node_state(node_p,
515 CY_AS_REQUEST_LIST_STATE_CANCELING);
517 /* Tell the caller that we are canceling this request */
518 /* NB: The callback is responsible for destroying the
519 * request and the response. we cannot count on the
520 * contents of these two after calling the callback.
522 node_p->callback(dev_p, i, node_p->rqt,
523 node_p->resp, CY_AS_ERROR_CANCELED);
525 /* Remove the pair from the queue */
526 mask = cy_as_hal_disable_interrupts();
527 ctxt_p->request_queue_p = node_p->next;
528 cy_as_hal_enable_interrupts(mask);
530 /* Free the list node */
531 state = cy_as_hal_disable_interrupts();
532 cy_as_hal_c_b_free(node_p);
533 cy_as_hal_enable_interrupts(state);
536 cy_as_ll_destroy_request(dev_p, dev_p->context[i]->req_p);
537 cy_as_hal_free(dev_p->context[i]);
538 dev_p->context[i] = 0;
541 cy_as_device_set_low_level_stopped(dev_p);
547 cy_as_ll_init_request(cy_as_ll_request_response *req_p,
548 uint16_t code, uint16_t context, uint16_t length)
550 uint16_t totallen = sizeof(cy_as_ll_request_response) +
551 (length - 1) * sizeof(uint16_t);
553 cy_as_hal_mem_set(req_p, 0, totallen);
554 req_p->length = length;
555 cy_as_ll_request_response__set_code(req_p, code);
556 cy_as_ll_request_response__set_context(req_p, context);
557 cy_as_ll_request_response__set_request(req_p);
561 * Create a new request.
563 cy_as_ll_request_response *
564 cy_as_ll_create_request(cy_as_device *dev_p, uint16_t code,
565 uint8_t context, uint16_t length)
567 cy_as_ll_request_response *req_p;
569 uint16_t totallen = sizeof(cy_as_ll_request_response) +
570 (length - 1) * sizeof(uint16_t);
574 state = cy_as_hal_disable_interrupts();
575 req_p = cy_as_hal_c_b_alloc(totallen);
576 cy_as_hal_enable_interrupts(state);
578 cy_as_ll_init_request(req_p, code, context, length);
587 cy_as_ll_destroy_request(cy_as_device *dev_p, cy_as_ll_request_response *req_p)
593 state = cy_as_hal_disable_interrupts();
594 cy_as_hal_c_b_free(req_p);
595 cy_as_hal_enable_interrupts(state);
600 cy_as_ll_init_response(cy_as_ll_request_response *req_p, uint16_t length)
602 uint16_t totallen = sizeof(cy_as_ll_request_response) +
603 (length - 1) * sizeof(uint16_t);
605 cy_as_hal_mem_set(req_p, 0, totallen);
606 req_p->length = length;
607 cy_as_ll_request_response__set_response(req_p);
611 * Create a new response
613 cy_as_ll_request_response *
614 cy_as_ll_create_response(cy_as_device *dev_p, uint16_t length)
616 cy_as_ll_request_response *req_p;
618 uint16_t totallen = sizeof(cy_as_ll_request_response) +
619 (length - 1) * sizeof(uint16_t);
623 state = cy_as_hal_disable_interrupts();
624 req_p = cy_as_hal_c_b_alloc(totallen);
625 cy_as_hal_enable_interrupts(state);
627 cy_as_ll_init_response(req_p, length);
633 * Destroy the new response
636 cy_as_ll_destroy_response(cy_as_device *dev_p, cy_as_ll_request_response *req_p)
642 state = cy_as_hal_disable_interrupts();
643 cy_as_hal_c_b_free(req_p);
644 cy_as_hal_enable_interrupts(state);
648 cy_as_read_intr_status(
652 cy_bool bloop = cy_true;
653 uint16_t v = 0, last = 0xffff;
656 * before determining if the mailboxes are ready for more data,
657 * we first check the mailbox interrupt to see if we need to
658 * receive data. this prevents a dead-lock condition that can
659 * occur when both sides are trying to receive data.
661 while (last == last) {
663 * disable interrupts to be sure we don't process the mailbox
664 * here and have the interrupt routine try to read this data
667 mask = cy_as_hal_disable_interrupts();
670 * see if there is data to be read.
672 v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_INTR_REG);
673 if ((v & CY_AS_MEM_P0_INTR_REG_MBINT) == 0) {
674 cy_as_hal_enable_interrupts(mask);
679 * queue the mailbox data for later processing.
680 * this allows the firmware to move forward and
681 * service the requst from the P port.
683 cy_as_ll_queue_mailbox_data(dev_p);
686 * enable interrupts again to service mailbox
687 * interrupts appropriately
689 cy_as_hal_enable_interrupts(mask);
693 * now, all data is received
695 last = cy_as_hal_read_register(dev_p->tag,
696 CY_AS_MEM_MCU_MB_STAT) & CY_AS_MEM_P0_MCU_MBNOTRD;
698 v = cy_as_hal_read_register(dev_p->tag,
699 CY_AS_MEM_MCU_MB_STAT) & CY_AS_MEM_P0_MCU_MBNOTRD;
710 * Send a single request or response using the mail box register.
711 * This function does not deal with the internal queues at all,
712 * but only sends the request or response across to the firmware
714 static cy_as_return_status_t
717 cy_as_ll_request_response *req_p)
725 if (cy_as_ll_request_response__is_request(req_p)) {
726 switch (cy_as_ll_request_response__get_context(req_p)) {
727 case CY_RQT_GENERAL_RQT_CONTEXT:
728 cy_as_hal_assert(req_p->length * 2 + 2 <
729 CY_CTX_GEN_MAX_DATA_SIZE);
732 case CY_RQT_RESOURCE_RQT_CONTEXT:
733 cy_as_hal_assert(req_p->length * 2 + 2 <
734 CY_CTX_RES_MAX_DATA_SIZE);
737 case CY_RQT_STORAGE_RQT_CONTEXT:
738 cy_as_hal_assert(req_p->length * 2 + 2 <
739 CY_CTX_STR_MAX_DATA_SIZE);
742 case CY_RQT_USB_RQT_CONTEXT:
743 cy_as_hal_assert(req_p->length * 2 + 2 <
744 CY_CTX_USB_MAX_DATA_SIZE);
750 /* Write the request to the mail box registers */
751 if (req_p->length > 3) {
752 uint16_t length = req_p->length;
756 dev_p->ll_sending_rqt = cy_true;
757 while (which < length) {
758 loopcount = cy_as_low_level_timeout_count;
760 v = cy_as_read_intr_status(dev_p);
762 } while (v && loopcount-- > 0);
765 cy_as_hal_print_message(
766 ">>>>>> LOW LEVEL TIMEOUT "
768 cy_as_hal_read_register(dev_p->tag,
769 CY_AS_MEM_MCU_MAILBOX0),
770 cy_as_hal_read_register(dev_p->tag,
771 CY_AS_MEM_MCU_MAILBOX1),
772 cy_as_hal_read_register(dev_p->tag,
773 CY_AS_MEM_MCU_MAILBOX2),
774 cy_as_hal_read_register(dev_p->tag,
775 CY_AS_MEM_MCU_MAILBOX3));
776 return CY_AS_ERROR_TIMEOUT;
779 if (dev_p->ll_abort_curr_rqt) {
780 dev_p->ll_sending_rqt = cy_false;
781 dev_p->ll_abort_curr_rqt = cy_false;
782 return CY_AS_ERROR_CANCELED;
785 int_stat = cy_as_hal_disable_interrupts();
788 * check again whether the mailbox is free.
789 * it is possible that an ISR came in and
790 * wrote into the mailboxes since we last
791 * checked the status.
793 v = cy_as_hal_read_register(dev_p->tag,
794 CY_AS_MEM_MCU_MB_STAT) &
795 CY_AS_MEM_P0_MCU_MBNOTRD;
797 /* Go back to the original check since
798 * the mailbox is not free. */
799 cy_as_hal_enable_interrupts(int_stat);
804 cy_as_hal_write_register(dev_p->tag,
805 CY_AS_MEM_MCU_MAILBOX1, length);
811 while ((which < length) && (st < 4)) {
812 cy_as_hal_write_register(dev_p->tag,
814 (CY_AS_MEM_MCU_MAILBOX0 + st),
815 req_p->data[which++]);
820 if (which == length) {
821 dev_p->ll_sending_rqt = cy_false;
822 mb0 |= CY_AS_REQUEST_RESPONSE_LAST_MASK;
825 if (dev_p->ll_abort_curr_rqt) {
826 dev_p->ll_sending_rqt = cy_false;
827 dev_p->ll_abort_curr_rqt = cy_false;
828 cy_as_hal_enable_interrupts(int_stat);
829 return CY_AS_ERROR_CANCELED;
832 cy_as_hal_write_register(dev_p->tag,
833 CY_AS_MEM_MCU_MAILBOX0, mb0);
835 /* Wait for the MBOX interrupt to be high */
836 cy_as_hal_sleep150();
837 cy_as_hal_enable_interrupts(int_stat);
840 check_mailbox_availability:
842 * wait for the mailbox registers to become available. this
843 * should be a very quick wait as the firmware is designed
844 * to accept requests at interrupt time and queue them for
847 loopcount = cy_as_low_level_timeout_count;
849 v = cy_as_read_intr_status(dev_p);
851 } while (v && loopcount-- > 0);
854 cy_as_hal_print_message(
855 ">>>>>> LOW LEVEL TIMEOUT %x %x %x %x\n",
856 cy_as_hal_read_register(dev_p->tag,
857 CY_AS_MEM_MCU_MAILBOX0),
858 cy_as_hal_read_register(dev_p->tag,
859 CY_AS_MEM_MCU_MAILBOX1),
860 cy_as_hal_read_register(dev_p->tag,
861 CY_AS_MEM_MCU_MAILBOX2),
862 cy_as_hal_read_register(dev_p->tag,
863 CY_AS_MEM_MCU_MAILBOX3));
864 return CY_AS_ERROR_TIMEOUT;
867 int_stat = cy_as_hal_disable_interrupts();
870 * check again whether the mailbox is free. it is
871 * possible that an ISR came in and wrote into the
872 * mailboxes since we last checked the status.
874 v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_MCU_MB_STAT) &
875 CY_AS_MEM_P0_MCU_MBNOTRD;
877 /* Go back to the original check
878 * since the mailbox is not free. */
879 cy_as_hal_enable_interrupts(int_stat);
880 goto check_mailbox_availability;
883 /* Write the data associated with the request
884 * into the mbox registers 1 - 3 */
886 for (i = req_p->length - 1; i >= 0; i--)
887 cy_as_hal_write_register(dev_p->tag,
888 cy_cast_int2U_int16(CY_AS_MEM_MCU_MAILBOX1 + i),
891 /* Write the mbox register 0 to trigger the interrupt */
892 cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_MCU_MAILBOX0,
893 req_p->box0 | CY_AS_REQUEST_RESPONSE_LAST_MASK);
895 cy_as_hal_sleep150();
896 cy_as_hal_enable_interrupts(int_stat);
899 return CY_AS_ERROR_SUCCESS;
903 * This function queues a single request to be sent to the firmware.
905 extern cy_as_return_status_t
906 cy_as_ll_send_request(
908 /* The request to send */
909 cy_as_ll_request_response *req,
910 /* Storage for a reply, must be sure
911 * it is of sufficient size */
912 cy_as_ll_request_response *resp,
913 /* If true, this is a synchronous request */
915 /* Callback to call when reply is received */
916 cy_as_response_callback cb
919 cy_as_context *ctxt_p;
920 uint16_t box0 = req->box0;
922 cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
923 cy_as_ll_request_list_node *node_p;
924 uint32_t mask, state;
926 cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
928 context = cy_as_mbox_get_context(box0);
929 cy_as_hal_assert(context < CY_RQT_CONTEXT_COUNT);
930 ctxt_p = dev_p->context[context];
932 /* Allocate the list node */
933 state = cy_as_hal_disable_interrupts();
934 node_p = cy_as_hal_c_b_alloc(sizeof(cy_as_ll_request_list_node));
935 cy_as_hal_enable_interrupts(state);
938 return CY_AS_ERROR_OUT_OF_MEMORY;
940 /* Initialize the list node */
941 node_p->callback = cb;
946 node_p->state = CY_AS_REQUEST_LIST_STATE_QUEUED;
948 cy_as_request_node_set_sync(node_p);
950 /* Put the request into the queue */
951 mask = cy_as_hal_disable_interrupts();
952 if (ctxt_p->request_queue_p == 0) {
954 ctxt_p->request_queue_p = node_p;
955 ctxt_p->last_node_p = node_p;
957 ctxt_p->last_node_p->next = node_p;
958 ctxt_p->last_node_p = node_p;
960 cy_as_hal_enable_interrupts(mask);
961 cy_as_ll_send_next_request(dev_p, ctxt_p);
963 if (!cy_as_device_is_in_callback(dev_p)) {
964 mask = cy_as_hal_disable_interrupts();
965 cy_as_mail_box_queued_data_handler(dev_p);
966 cy_as_hal_enable_interrupts(mask);
973 cy_as_ll_send_callback(
976 cy_as_ll_request_response *rqt,
977 cy_as_ll_request_response *resp,
978 cy_as_return_status_t ret)
985 cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
988 * storage the state to return to the caller
990 dev_p->ll_error = ret;
993 * now wake the caller
995 cy_as_hal_wake(&dev_p->context[context]->channel);
998 cy_as_return_status_t
999 cy_as_ll_send_request_wait_reply(
1000 cy_as_device *dev_p,
1001 /* The request to send */
1002 cy_as_ll_request_response *req,
1003 /* Storage for a reply, must be
1004 * sure it is of sufficient size */
1005 cy_as_ll_request_response *resp
1008 cy_as_return_status_t ret;
1010 /* Larger 8 sec time-out to handle the init
1011 * delay for slower storage devices in USB FS. */
1012 uint32_t loopcount = 800;
1013 cy_as_context *ctxt_p;
1015 /* Get the context for the request */
1016 context = cy_as_ll_request_response__get_context(req);
1017 cy_as_hal_assert(context < CY_RQT_CONTEXT_COUNT);
1018 ctxt_p = dev_p->context[context];
1020 ret = cy_as_ll_send_request(dev_p, req, resp,
1021 cy_true, cy_as_ll_send_callback);
1022 if (ret != CY_AS_ERROR_SUCCESS)
1025 while (loopcount-- > 0) {
1027 * sleep while we wait on the response. receiving the reply will
1028 * wake this thread. we will wait, at most 2 seconds (10 ms*200
1029 * tries) before we timeout. note if the reply arrives, we will
1030 * not sleep the entire 10 ms, just til the reply arrives.
1032 cy_as_hal_sleep_on(&ctxt_p->channel, 10);
1035 * if the request has left the queue, it means the request has
1036 * been sent and the reply has been received. this means we can
1037 * return to the caller and be sure the reply has been received.
1039 if (!cy_as_ll_is_in_queue(ctxt_p, req))
1040 return dev_p->ll_error;
1043 /* Remove the QueueListNode for this request. */
1044 cy_as_ll_remove_request(dev_p, ctxt_p, req, cy_true);
1046 return CY_AS_ERROR_TIMEOUT;
1049 cy_as_return_status_t
1050 cy_as_ll_register_request_callback(
1051 cy_as_device *dev_p,
1053 cy_as_response_callback cb)
1055 cy_as_context *ctxt_p;
1056 cy_as_hal_assert(context < CY_RQT_CONTEXT_COUNT);
1057 ctxt_p = dev_p->context[context];
1059 ctxt_p->request_callback = cb;
1060 return CY_AS_ERROR_SUCCESS;
1064 cy_as_ll_request_response__pack(
1065 cy_as_ll_request_response *req_p,
1071 uint8_t *dp = (uint8_t *)data_p;
1073 while (length > 1) {
1074 dt = ((*dp++) << 8);
1076 cy_as_ll_request_response__set_word(req_p, offset, dt);
1083 cy_as_ll_request_response__set_word(req_p, offset, dt);
1088 cy_as_ll_request_response__unpack(
1089 cy_as_ll_request_response *req_p,
1094 uint8_t *dp = (uint8_t *)data_p;
1096 while (length-- > 0) {
1097 uint16_t val = cy_as_ll_request_response__get_word
1099 *dp++ = (uint8_t)((val >> 8) & 0xff);
1103 *dp++ = (uint8_t)(val & 0xff);
1108 extern cy_as_return_status_t
1109 cy_as_ll_send_status_response(
1110 cy_as_device *dev_p,
1113 uint8_t clear_storage)
1115 cy_as_return_status_t ret;
1116 cy_as_ll_request_response resp;
1117 cy_as_ll_request_response *resp_p = &resp;
1119 cy_as_hal_mem_set(resp_p, 0, sizeof(resp));
1121 cy_as_ll_request_response__set_response(resp_p);
1122 cy_as_ll_request_response__set_context(resp_p, context);
1125 cy_as_ll_request_response__set_clear_storage_flag(resp_p);
1127 cy_as_ll_request_response__set_code(resp_p, CY_RESP_SUCCESS_FAILURE);
1128 cy_as_ll_request_response__set_word(resp_p, 0, code);
1130 ret = cy_as_send_one(dev_p, resp_p);
1135 extern cy_as_return_status_t
1136 cy_as_ll_send_data_response(
1137 cy_as_device *dev_p,
1143 cy_as_ll_request_response *resp_p;
1145 uint8_t respbuf[256];
1148 return CY_AS_ERROR_INVALID_SIZE;
1150 /* Word length for bytes */
1153 /* If byte length odd, add one more */
1157 /* One for the length of field */
1160 resp_p = (cy_as_ll_request_response *)respbuf;
1161 cy_as_hal_mem_set(resp_p, 0, sizeof(respbuf));
1162 resp_p->length = wlen;
1163 cy_as_ll_request_response__set_context(resp_p, context);
1164 cy_as_ll_request_response__set_code(resp_p, code);
1166 cy_as_ll_request_response__set_word(resp_p, 0, length);
1167 cy_as_ll_request_response__pack(resp_p, 1, length, data);
1169 return cy_as_send_one(dev_p, resp_p);
1173 cy_as_ll_is_e_p_transfer_related_request(cy_as_ll_request_response *rqt_p,
1174 cy_as_end_point_number_t ep)
1177 uint8_t type = cy_as_ll_request_response__get_code(rqt_p);
1179 if (cy_as_ll_request_response__get_context(rqt_p) !=
1180 CY_RQT_USB_RQT_CONTEXT)
1184 * when cancelling outstanding EP0 data transfers, any pending
1185 * setup ACK requests also need to be cancelled.
1187 if ((ep == 0) && (type == CY_RQT_ACK_SETUP_PACKET))
1190 if (type != CY_RQT_USB_EP_DATA)
1193 v = cy_as_ll_request_response__get_word(rqt_p, 0);
1194 if ((cy_as_end_point_number_t)((v >> 13) & 1) != ep)
1200 cy_as_return_status_t
1201 cy_as_ll_remove_ep_data_requests(cy_as_device *dev_p,
1202 cy_as_end_point_number_t ep)
1204 cy_as_context *ctxt_p;
1205 cy_as_ll_request_list_node *node_p;
1209 * first, remove any queued requests
1211 ctxt_p = dev_p->context[CY_RQT_USB_RQT_CONTEXT];
1213 for (node_p = ctxt_p->request_queue_p; node_p;
1214 node_p = node_p->next) {
1215 if (cy_as_ll_is_e_p_transfer_related_request
1216 (node_p->rqt, ep)) {
1217 cy_as_ll_remove_request(dev_p, ctxt_p,
1218 node_p->rqt, cy_false);
1224 * now, deal with any request that may be in transit
1226 imask = cy_as_hal_disable_interrupts();
1228 if (ctxt_p->request_queue_p != 0 &&
1229 cy_as_ll_is_e_p_transfer_related_request
1230 (ctxt_p->request_queue_p->rqt, ep) &&
1231 cy_as_request_get_node_state(ctxt_p->request_queue_p) ==
1232 CY_AS_REQUEST_LIST_STATE_WAITING) {
1233 cy_as_hal_print_message("need to remove an in-transit "
1234 "request to antioch\n");
1237 * if the request has not been fully sent to west bridge
1238 * yet, abort sending. otherwise, terminate the request
1239 * with a CANCELED status. firmware will already have
1240 * terminated this transfer.
1242 if (dev_p->ll_sending_rqt)
1243 dev_p->ll_abort_curr_rqt = cy_true;
1247 node_p = ctxt_p->request_queue_p;
1248 if (node_p->callback)
1249 node_p->callback(dev_p, ctxt_p->number,
1250 node_p->rqt, node_p->resp,
1251 CY_AS_ERROR_CANCELED);
1253 ctxt_p->request_queue_p = node_p->next;
1254 state = cy_as_hal_disable_interrupts();
1255 cy_as_hal_c_b_free(node_p);
1256 cy_as_hal_enable_interrupts(state);
1260 cy_as_hal_enable_interrupts(imask);
1263 return CY_AS_ERROR_SUCCESS;