staging: tidspbridge: remove custom TRUE FALSE
[pandora-kernel.git] / drivers / staging / tidspbridge / core / tiomap3430.c
1 /*
2  * tiomap.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * Processor Manager Driver for TI OMAP3430 EVM.
7  *
8  * Copyright (C) 2005-2006 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18
19 /*  ----------------------------------- Host OS */
20 #include <dspbridge/host_os.h>
21 #include <linux/mm.h>
22 #include <linux/mmzone.h>
23 #include <plat/control.h>
24
25 /*  ----------------------------------- DSP/BIOS Bridge */
26 #include <dspbridge/std.h>
27 #include <dspbridge/dbdefs.h>
28
29 /*  ----------------------------------- Trace & Debug */
30 #include <dspbridge/dbc.h>
31
32 /*  ----------------------------------- OS Adaptation Layer */
33 #include <dspbridge/cfg.h>
34 #include <dspbridge/drv.h>
35 #include <dspbridge/sync.h>
36
37 /* ------------------------------------ Hardware Abstraction Layer */
38 #include <hw_defs.h>
39 #include <hw_mmu.h>
40
41 /*  ----------------------------------- Link Driver */
42 #include <dspbridge/dspdefs.h>
43 #include <dspbridge/dspchnl.h>
44 #include <dspbridge/dspdeh.h>
45 #include <dspbridge/dspio.h>
46 #include <dspbridge/dspmsg.h>
47 #include <dspbridge/pwr.h>
48 #include <dspbridge/io_sm.h>
49
50 /*  ----------------------------------- Platform Manager */
51 #include <dspbridge/dev.h>
52 #include <dspbridge/dspapi.h>
53 #include <dspbridge/dmm.h>
54 #include <dspbridge/wdt.h>
55
56 /*  ----------------------------------- Local */
57 #include "_tiomap.h"
58 #include "_tiomap_pwr.h"
59 #include "tiomap_io.h"
60
61 /* Offset in shared mem to write to in order to synchronize start with DSP */
62 #define SHMSYNCOFFSET 4         /* GPP byte offset */
63
64 #define BUFFERSIZE 1024
65
66 #define TIHELEN_ACKTIMEOUT  10000
67
68 #define MMU_SECTION_ADDR_MASK    0xFFF00000
69 #define MMU_SSECTION_ADDR_MASK   0xFF000000
70 #define MMU_LARGE_PAGE_MASK      0xFFFF0000
71 #define MMU_SMALL_PAGE_MASK      0xFFFFF000
72 #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
73 #define PAGES_II_LVL_TABLE   512
74 #define PHYS_TO_PAGE(phys)      pfn_to_page((phys) >> PAGE_SHIFT)
75
76 /* Forward Declarations: */
77 static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
78 static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
79                                   OUT u8 *host_buff,
80                                   u32 dsp_addr, u32 ul_num_bytes,
81                                   u32 mem_type);
82 static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
83                                    u32 dsp_addr);
84 static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
85                                     int *board_state);
86 static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt);
87 static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
88                                    IN u8 *host_buff,
89                                    u32 dsp_addr, u32 ul_num_bytes,
90                                    u32 mem_type);
91 static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
92                                     u32 brd_state);
93 static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
94                                    u32 dsp_dest_addr, u32 dsp_src_addr,
95                                    u32 ul_num_bytes, u32 mem_type);
96 static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
97                                     IN u8 *host_buff, u32 dsp_addr,
98                                     u32 ul_num_bytes, u32 mem_type);
99 static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
100                                   u32 ul_mpu_addr, u32 virt_addr,
101                                   u32 ul_num_bytes, u32 ul_map_attr,
102                                   struct page **mapped_pages);
103 static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
104                                      u32 virt_addr, u32 ul_num_bytes);
105 static int bridge_dev_create(OUT struct bridge_dev_context
106                                         **dev_cntxt,
107                                         struct dev_object *hdev_obj,
108                                         IN struct cfg_hostres *config_param);
109 static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
110                                   u32 dw_cmd, IN OUT void *pargs);
111 static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
112 static u32 user_va2_pa(struct mm_struct *mm, u32 address);
113 static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
114                              u32 va, u32 size,
115                              struct hw_mmu_map_attrs_t *map_attrs);
116 static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
117                           u32 size, struct hw_mmu_map_attrs_t *attrs);
118 static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
119                                   u32 ul_mpu_addr, u32 virt_addr,
120                                   u32 ul_num_bytes,
121                                   struct hw_mmu_map_attrs_t *hw_attrs);
122
123 bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
124
125 /*  ----------------------------------- Globals */
126
127 /* Attributes of L2 page tables for DSP MMU */
128 struct page_info {
129         u32 num_entries;        /* Number of valid PTEs in the L2 PT */
130 };
131
132 /* Attributes used to manage the DSP MMU page tables */
133 struct pg_table_attrs {
134         spinlock_t pg_lock;     /* Critical section object handle */
135
136         u32 l1_base_pa;         /* Physical address of the L1 PT */
137         u32 l1_base_va;         /* Virtual  address of the L1 PT */
138         u32 l1_size;            /* Size of the L1 PT */
139         u32 l1_tbl_alloc_pa;
140         /* Physical address of Allocated mem for L1 table. May not be aligned */
141         u32 l1_tbl_alloc_va;
142         /* Virtual address of Allocated mem for L1 table. May not be aligned */
143         u32 l1_tbl_alloc_sz;
144         /* Size of consistent memory allocated for L1 table.
145          * May not be aligned */
146
147         u32 l2_base_pa;         /* Physical address of the L2 PT */
148         u32 l2_base_va;         /* Virtual  address of the L2 PT */
149         u32 l2_size;            /* Size of the L2 PT */
150         u32 l2_tbl_alloc_pa;
151         /* Physical address of Allocated mem for L2 table. May not be aligned */
152         u32 l2_tbl_alloc_va;
153         /* Virtual address of Allocated mem for L2 table. May not be aligned */
154         u32 l2_tbl_alloc_sz;
155         /* Size of consistent memory allocated for L2 table.
156          * May not be aligned */
157
158         u32 l2_num_pages;       /* Number of allocated L2 PT */
159         /* Array [l2_num_pages] of L2 PT info structs */
160         struct page_info *pg_info;
161 };
162
163 /*
164  *  This Bridge driver's function interface table.
165  */
166 static struct bridge_drv_interface drv_interface_fxns = {
167         /* Bridge API ver. for which this bridge driver is built. */
168         BRD_API_MAJOR_VERSION,
169         BRD_API_MINOR_VERSION,
170         bridge_dev_create,
171         bridge_dev_destroy,
172         bridge_dev_ctrl,
173         bridge_brd_monitor,
174         bridge_brd_start,
175         bridge_brd_stop,
176         bridge_brd_status,
177         bridge_brd_read,
178         bridge_brd_write,
179         bridge_brd_set_state,
180         bridge_brd_mem_copy,
181         bridge_brd_mem_write,
182         bridge_brd_mem_map,
183         bridge_brd_mem_un_map,
184         /* The following CHNL functions are provided by chnl_io.lib: */
185         bridge_chnl_create,
186         bridge_chnl_destroy,
187         bridge_chnl_open,
188         bridge_chnl_close,
189         bridge_chnl_add_io_req,
190         bridge_chnl_get_ioc,
191         bridge_chnl_cancel_io,
192         bridge_chnl_flush_io,
193         bridge_chnl_get_info,
194         bridge_chnl_get_mgr_info,
195         bridge_chnl_idle,
196         bridge_chnl_register_notify,
197         /* The following IO functions are provided by chnl_io.lib: */
198         bridge_io_create,
199         bridge_io_destroy,
200         bridge_io_on_loaded,
201         bridge_io_get_proc_load,
202         /* The following msg_ctrl functions are provided by chnl_io.lib: */
203         bridge_msg_create,
204         bridge_msg_create_queue,
205         bridge_msg_delete,
206         bridge_msg_delete_queue,
207         bridge_msg_get,
208         bridge_msg_put,
209         bridge_msg_register_notify,
210         bridge_msg_set_queue_id,
211 };
212
213 static inline void flush_all(struct bridge_dev_context *dev_context)
214 {
215         if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
216             dev_context->dw_brd_state == BRD_HIBERNATION)
217                 wake_dsp(dev_context, NULL);
218
219         hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
220 }
221
222 static void bad_page_dump(u32 pa, struct page *pg)
223 {
224         pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
225         pr_emerg("Bad page state in process '%s'\n"
226                  "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
227                  "Backtrace:\n",
228                  current->comm, pg, (int)(2 * sizeof(unsigned long)),
229                  (unsigned long)pg->flags, pg->mapping,
230                  page_mapcount(pg), page_count(pg));
231         dump_stack();
232 }
233
234 /*
235  *  ======== bridge_drv_entry ========
236  *  purpose:
237  *      Bridge Driver entry point.
238  */
239 void bridge_drv_entry(OUT struct bridge_drv_interface **drv_intf,
240                    IN CONST char *driver_file_name)
241 {
242
243         DBC_REQUIRE(driver_file_name != NULL);
244
245         io_sm_init();           /* Initialization of io_sm module */
246
247         if (strcmp(driver_file_name, "UMA") == 0)
248                 *drv_intf = &drv_interface_fxns;
249         else
250                 dev_dbg(bridge, "%s Unknown Bridge file name", __func__);
251
252 }
253
254 /*
255  *  ======== bridge_brd_monitor ========
256  *  purpose:
257  *      This bridge_brd_monitor puts DSP into a Loadable state.
258  *      i.e Application can load and start the device.
259  *
260  *  Preconditions:
261  *      Device in 'OFF' state.
262  */
263 static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
264 {
265         int status = 0;
266         struct bridge_dev_context *dev_context = dev_ctxt;
267         u32 temp;
268         struct dspbridge_platform_data *pdata =
269                                     omap_dspbridge_dev->dev.platform_data;
270
271         temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
272                                         OMAP_POWERSTATEST_MASK;
273         if (!(temp & 0x02)) {
274                 /* IVA2 is not in ON state */
275                 /* Read and set PM_PWSTCTRL_IVA2  to ON */
276                 (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
277                         PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
278                 /* Set the SW supervised state transition */
279                 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP,
280                                         OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
281
282                 /* Wait until the state has moved to ON */
283                 while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
284                                                 OMAP_INTRANSITION_MASK)
285                         ;
286                 /* Disable Automatic transition */
287                 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
288                                         OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
289         }
290         (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
291                                         OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
292         dsp_clk_enable(DSP_CLK_IVA2);
293
294         if (DSP_SUCCEEDED(status)) {
295                 /* set the device state to IDLE */
296                 dev_context->dw_brd_state = BRD_IDLE;
297         }
298         return status;
299 }
300
301 /*
302  *  ======== bridge_brd_read ========
303  *  purpose:
304  *      Reads buffers for DSP memory.
305  */
306 static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
307                                   OUT u8 *host_buff, u32 dsp_addr,
308                                   u32 ul_num_bytes, u32 mem_type)
309 {
310         int status = 0;
311         struct bridge_dev_context *dev_context = dev_ctxt;
312         u32 offset;
313         u32 dsp_base_addr = dev_ctxt->dw_dsp_base_addr;
314
315         if (dsp_addr < dev_context->dw_dsp_start_add) {
316                 status = -EPERM;
317                 return status;
318         }
319         /* change here to account for the 3 bands of the DSP internal memory */
320         if ((dsp_addr - dev_context->dw_dsp_start_add) <
321             dev_context->dw_internal_size) {
322                 offset = dsp_addr - dev_context->dw_dsp_start_add;
323         } else {
324                 status = read_ext_dsp_data(dev_context, host_buff, dsp_addr,
325                                            ul_num_bytes, mem_type);
326                 return status;
327         }
328         /* copy the data from  DSP memory, */
329         memcpy(host_buff, (void *)(dsp_base_addr + offset), ul_num_bytes);
330         return status;
331 }
332
333 /*
334  *  ======== bridge_brd_set_state ========
335  *  purpose:
336  *      This routine updates the Board status.
337  */
338 static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
339                                     u32 brd_state)
340 {
341         int status = 0;
342         struct bridge_dev_context *dev_context = dev_ctxt;
343
344         dev_context->dw_brd_state = brd_state;
345         return status;
346 }
347
348 /*
349  *  ======== bridge_brd_start ========
350  *  purpose:
351  *      Initializes DSP MMU and Starts DSP.
352  *
353  *  Preconditions:
354  *  a) DSP domain is 'ACTIVE'.
355  *  b) DSP_RST1 is asserted.
356  *  b) DSP_RST2 is released.
357  */
358 static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
359                                    u32 dsp_addr)
360 {
361         int status = 0;
362         struct bridge_dev_context *dev_context = dev_ctxt;
363         u32 dw_sync_addr = 0;
364         u32 ul_shm_base;        /* Gpp Phys SM base addr(byte) */
365         u32 ul_shm_base_virt;   /* Dsp Virt SM base addr */
366         u32 ul_tlb_base_virt;   /* Base of MMU TLB entry */
367         /* Offset of shm_base_virt from tlb_base_virt */
368         u32 ul_shm_offset_virt;
369         s32 entry_ndx;
370         s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */
371         struct cfg_hostres *resources = NULL;
372         u32 temp;
373         u32 ul_dsp_clk_rate;
374         u32 ul_dsp_clk_addr;
375         u32 ul_bios_gp_timer;
376         u32 clk_cmd;
377         struct io_mgr *hio_mgr;
378         u32 ul_load_monitor_timer;
379         struct dspbridge_platform_data *pdata =
380                                 omap_dspbridge_dev->dev.platform_data;
381
382         /* The device context contains all the mmu setup info from when the
383          * last dsp base image was loaded. The first entry is always
384          * SHMMEM base. */
385         /* Get SHM_BEG - convert to byte address */
386         (void)dev_get_symbol(dev_context->hdev_obj, SHMBASENAME,
387                              &ul_shm_base_virt);
388         ul_shm_base_virt *= DSPWORDSIZE;
389         DBC_ASSERT(ul_shm_base_virt != 0);
390         /* DSP Virtual address */
391         ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va;
392         DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
393         ul_shm_offset_virt =
394             ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
395         /* Kernel logical address */
396         ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
397
398         DBC_ASSERT(ul_shm_base != 0);
399         /* 2nd wd is used as sync field */
400         dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
401         /* Write a signature into the shm base + offset; this will
402          * get cleared when the DSP program starts. */
403         if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
404                 pr_err("%s: Illegal SM base\n", __func__);
405                 status = -EPERM;
406         } else
407                 *((volatile u32 *)dw_sync_addr) = 0xffffffff;
408
409         if (DSP_SUCCEEDED(status)) {
410                 resources = dev_context->resources;
411                 if (!resources)
412                         status = -EPERM;
413
414                 /* Assert RST1 i.e only the RST only for DSP megacell */
415                 if (DSP_SUCCEEDED(status)) {
416                         (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
417                                         OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
418                                         OMAP2_RM_RSTCTRL);
419                         /* Mask address with 1K for compatibility */
420                         __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
421                                         OMAP343X_CTRL_REGADDR(
422                                         OMAP343X_CONTROL_IVA2_BOOTADDR));
423                         /*
424                          * Set bootmode to self loop if dsp_debug flag is true
425                          */
426                         __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
427                                         OMAP343X_CTRL_REGADDR(
428                                         OMAP343X_CONTROL_IVA2_BOOTMOD));
429                 }
430         }
431         if (DSP_SUCCEEDED(status)) {
432                 /* Reset and Unreset the RST2, so that BOOTADDR is copied to
433                  * IVA2 SYSC register */
434                 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
435                         OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
436                 udelay(100);
437                 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
438                                         OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
439                 udelay(100);
440
441                 /* Disbale the DSP MMU */
442                 hw_mmu_disable(resources->dw_dmmu_base);
443                 /* Disable TWL */
444                 hw_mmu_twl_disable(resources->dw_dmmu_base);
445
446                 /* Only make TLB entry if both addresses are non-zero */
447                 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
448                      entry_ndx++) {
449                         struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
450                         struct hw_mmu_map_attrs_t map_attrs = {
451                                 .endianism = e->endianism,
452                                 .element_size = e->elem_size,
453                                 .mixed_size = e->mixed_mode,
454                         };
455
456                         if (!e->ul_gpp_pa || !e->ul_dsp_va)
457                                 continue;
458
459                         dev_dbg(bridge,
460                                         "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
461                                         itmp_entry_ndx,
462                                         e->ul_gpp_pa,
463                                         e->ul_dsp_va,
464                                         e->ul_size);
465
466                         hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base,
467                                         e->ul_gpp_pa,
468                                         e->ul_dsp_va,
469                                         e->ul_size,
470                                         itmp_entry_ndx,
471                                         &map_attrs, 1, 1);
472
473                         itmp_entry_ndx++;
474                 }
475         }
476
477         /* Lock the above TLB entries and get the BIOS and load monitor timer
478          * information */
479         if (DSP_SUCCEEDED(status)) {
480                 hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
481                 hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
482                 hw_mmu_ttb_set(resources->dw_dmmu_base,
483                                dev_context->pt_attrs->l1_base_pa);
484                 hw_mmu_twl_enable(resources->dw_dmmu_base);
485                 /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
486
487                 temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
488                 temp = (temp & 0xFFFFFFEF) | 0x11;
489                 __raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
490
491                 /* Let the DSP MMU run */
492                 hw_mmu_enable(resources->dw_dmmu_base);
493
494                 /* Enable the BIOS clock */
495                 (void)dev_get_symbol(dev_context->hdev_obj,
496                                      BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
497                 (void)dev_get_symbol(dev_context->hdev_obj,
498                                      BRIDGEINIT_LOADMON_GPTIMER,
499                                      &ul_load_monitor_timer);
500         }
501
502         if (DSP_SUCCEEDED(status)) {
503                 if (ul_load_monitor_timer != 0xFFFF) {
504                         clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
505                             ul_load_monitor_timer;
506                         dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
507                 } else {
508                         dev_dbg(bridge, "Not able to get the symbol for Load "
509                                 "Monitor Timer\n");
510                 }
511         }
512
513         if (DSP_SUCCEEDED(status)) {
514                 if (ul_bios_gp_timer != 0xFFFF) {
515                         clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
516                             ul_bios_gp_timer;
517                         dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
518                 } else {
519                         dev_dbg(bridge,
520                                 "Not able to get the symbol for BIOS Timer\n");
521                 }
522         }
523
524         if (DSP_SUCCEEDED(status)) {
525                 /* Set the DSP clock rate */
526                 (void)dev_get_symbol(dev_context->hdev_obj,
527                                      "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
528                 /*Set Autoidle Mode for IVA2 PLL */
529                 (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
530                                 OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
531
532                 if ((unsigned int *)ul_dsp_clk_addr != NULL) {
533                         /* Get the clock rate */
534                         ul_dsp_clk_rate = dsp_clk_get_iva2_rate();
535                         dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n",
536                                 __func__, ul_dsp_clk_rate);
537                         (void)bridge_brd_write(dev_context,
538                                                (u8 *) &ul_dsp_clk_rate,
539                                                ul_dsp_clk_addr, sizeof(u32), 0);
540                 }
541                 /*
542                  * Enable Mailbox events and also drain any pending
543                  * stale messages.
544                  */
545                 dev_context->mbox = omap_mbox_get("dsp");
546                 if (IS_ERR(dev_context->mbox)) {
547                         dev_context->mbox = NULL;
548                         pr_err("%s: Failed to get dsp mailbox handle\n",
549                                                                 __func__);
550                         status = -EPERM;
551                 }
552
553         }
554         if (DSP_SUCCEEDED(status)) {
555                 dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg;
556
557 /*PM_IVA2GRPSEL_PER = 0xC0;*/
558                 temp = (u32) *((reg_uword32 *)
559                                 ((u32) (resources->dw_per_pm_base) + 0xA8));
560                 temp = (temp & 0xFFFFFF30) | 0xC0;
561                 *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA8)) =
562                     (u32) temp;
563
564 /*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
565                 temp = (u32) *((reg_uword32 *)
566                                 ((u32) (resources->dw_per_pm_base) + 0xA4));
567                 temp = (temp & 0xFFFFFF3F);
568                 *((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA4)) =
569                     (u32) temp;
570 /*CM_SLEEPDEP_PER |= 0x04; */
571                 temp = (u32) *((reg_uword32 *)
572                                 ((u32) (resources->dw_per_base) + 0x44));
573                 temp = (temp & 0xFFFFFFFB) | 0x04;
574                 *((reg_uword32 *) ((u32) (resources->dw_per_base) + 0x44)) =
575                     (u32) temp;
576
577 /*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
578                 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
579                                         OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
580
581                 /* Let DSP go */
582                 dev_dbg(bridge, "%s Unreset\n", __func__);
583                 /* Enable DSP MMU Interrupts */
584                 hw_mmu_event_enable(resources->dw_dmmu_base,
585                                     HW_MMU_ALL_INTERRUPTS);
586                 /* release the RST1, DSP starts executing now .. */
587                 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
588                                         OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
589
590                 dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr);
591                 dev_dbg(bridge, "DSP c_int00 Address =  0x%x\n", dsp_addr);
592                 if (dsp_debug)
593                         while (*((volatile u16 *)dw_sync_addr))
594                                 ;;
595
596                 /* Wait for DSP to clear word in shared memory */
597                 /* Read the Location */
598                 if (!wait_for_start(dev_context, dw_sync_addr))
599                         status = -ETIMEDOUT;
600
601                 /* Start wdt */
602                 dsp_wdt_sm_set((void *)ul_shm_base);
603                 dsp_wdt_enable(true);
604
605                 status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
606                 if (hio_mgr) {
607                         io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL);
608                         /* Write the synchronization bit to indicate the
609                          * completion of OPP table update to DSP
610                          */
611                         *((volatile u32 *)dw_sync_addr) = 0XCAFECAFE;
612
613                         /* update board state */
614                         dev_context->dw_brd_state = BRD_RUNNING;
615                         /* (void)chnlsm_enable_interrupt(dev_context); */
616                 } else {
617                         dev_context->dw_brd_state = BRD_UNKNOWN;
618                 }
619         }
620         return status;
621 }
622
623 /*
624  *  ======== bridge_brd_stop ========
625  *  purpose:
626  *      Puts DSP in self loop.
627  *
628  *  Preconditions :
629  *  a) None
630  */
631 static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
632 {
633         int status = 0;
634         struct bridge_dev_context *dev_context = dev_ctxt;
635         struct pg_table_attrs *pt_attrs;
636         u32 dsp_pwr_state;
637         int clk_status;
638         struct dspbridge_platform_data *pdata =
639                                 omap_dspbridge_dev->dev.platform_data;
640
641         if (dev_context->dw_brd_state == BRD_STOPPED)
642                 return status;
643
644         /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
645          * before turning off the clocks.. This is to ensure that there are no
646          * pending L3 or other transactons from IVA2 */
647         dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
648                                         OMAP_POWERSTATEST_MASK;
649         if (dsp_pwr_state != PWRDM_POWER_OFF) {
650                 sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE);
651                 mdelay(10);
652
653                 clk_status = dsp_clk_disable(DSP_CLK_IVA2);
654
655                 /* IVA2 is not in OFF state */
656                 /* Set PM_PWSTCTRL_IVA2  to OFF */
657                 (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
658                         PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
659                 /* Set the SW supervised state transition for Sleep */
660                 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP,
661                                         OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
662         } else {
663                 clk_status = dsp_clk_disable(DSP_CLK_IVA2);
664         }
665         udelay(10);
666         /* Release the Ext Base virtual Address as the next DSP Program
667          * may have a different load address */
668         if (dev_context->dw_dsp_ext_base_addr)
669                 dev_context->dw_dsp_ext_base_addr = 0;
670
671         dev_context->dw_brd_state = BRD_STOPPED;        /* update board state */
672
673         dsp_wdt_enable(false);
674
675         /* This is a good place to clear the MMU page tables as well */
676         if (dev_context->pt_attrs) {
677                 pt_attrs = dev_context->pt_attrs;
678                 memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
679                 memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
680                 memset((u8 *) pt_attrs->pg_info, 0x00,
681                        (pt_attrs->l2_num_pages * sizeof(struct page_info)));
682         }
683         /* Disable the mailbox interrupts */
684         if (dev_context->mbox) {
685                 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
686                 omap_mbox_put(dev_context->mbox);
687                 dev_context->mbox = NULL;
688         }
689         /* Reset IVA2 clocks*/
690         (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
691                         OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
692
693         return status;
694 }
695
696 /*
697  *  ======== bridge_brd_delete ========
698  *  purpose:
699  *      Puts DSP in Low power mode
700  *
701  *  Preconditions :
702  *  a) None
703  */
704 static int bridge_brd_delete(struct bridge_dev_context *dev_ctxt)
705 {
706         int status = 0;
707         struct bridge_dev_context *dev_context = dev_ctxt;
708         struct pg_table_attrs *pt_attrs;
709         int clk_status;
710         struct dspbridge_platform_data *pdata =
711                                 omap_dspbridge_dev->dev.platform_data;
712
713         if (dev_context->dw_brd_state == BRD_STOPPED)
714                 return status;
715
716         /* as per TRM, it is advised to first drive
717          * the IVA2 to 'Standby' mode, before turning off the clocks.. This is
718          * to ensure that there are no pending L3 or other transactons from
719          * IVA2 */
720         status = sleep_dsp(dev_context, PWR_EMERGENCYDEEPSLEEP, NULL);
721         clk_status = dsp_clk_disable(DSP_CLK_IVA2);
722
723         /* Release the Ext Base virtual Address as the next DSP Program
724          * may have a different load address */
725         if (dev_context->dw_dsp_ext_base_addr)
726                 dev_context->dw_dsp_ext_base_addr = 0;
727
728         dev_context->dw_brd_state = BRD_STOPPED;        /* update board state */
729
730         /* This is a good place to clear the MMU page tables as well */
731         if (dev_context->pt_attrs) {
732                 pt_attrs = dev_context->pt_attrs;
733                 memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
734                 memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
735                 memset((u8 *) pt_attrs->pg_info, 0x00,
736                        (pt_attrs->l2_num_pages * sizeof(struct page_info)));
737         }
738         /* Disable the mail box interrupts */
739         if (dev_context->mbox) {
740                 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
741                 omap_mbox_put(dev_context->mbox);
742                 dev_context->mbox = NULL;
743         }
744         /* Reset IVA2 clocks*/
745         (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
746                         OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
747
748         return status;
749 }
750
751 /*
752  *  ======== bridge_brd_status ========
753  *      Returns the board status.
754  */
755 static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
756                                     int *board_state)
757 {
758         struct bridge_dev_context *dev_context = dev_ctxt;
759         *board_state = dev_context->dw_brd_state;
760         return 0;
761 }
762
763 /*
764  *  ======== bridge_brd_write ========
765  *      Copies the buffers to DSP internal or external memory.
766  */
767 static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
768                                    IN u8 *host_buff, u32 dsp_addr,
769                                    u32 ul_num_bytes, u32 mem_type)
770 {
771         int status = 0;
772         struct bridge_dev_context *dev_context = dev_ctxt;
773
774         if (dsp_addr < dev_context->dw_dsp_start_add) {
775                 status = -EPERM;
776                 return status;
777         }
778         if ((dsp_addr - dev_context->dw_dsp_start_add) <
779             dev_context->dw_internal_size) {
780                 status = write_dsp_data(dev_ctxt, host_buff, dsp_addr,
781                                         ul_num_bytes, mem_type);
782         } else {
783                 status = write_ext_dsp_data(dev_context, host_buff, dsp_addr,
784                                             ul_num_bytes, mem_type, false);
785         }
786
787         return status;
788 }
789
790 /*
791  *  ======== bridge_dev_create ========
792  *      Creates a driver object. Puts DSP in self loop.
793  */
794 static int bridge_dev_create(OUT struct bridge_dev_context
795                                         **dev_cntxt,
796                                         struct dev_object *hdev_obj,
797                                         IN struct cfg_hostres *config_param)
798 {
799         int status = 0;
800         struct bridge_dev_context *dev_context = NULL;
801         s32 entry_ndx;
802         struct cfg_hostres *resources = config_param;
803         struct pg_table_attrs *pt_attrs;
804         u32 pg_tbl_pa;
805         u32 pg_tbl_va;
806         u32 align_size;
807         struct drv_data *drv_datap = dev_get_drvdata(bridge);
808
809         /* Allocate and initialize a data structure to contain the bridge driver
810          *  state, which becomes the context for later calls into this driver */
811         dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL);
812         if (!dev_context) {
813                 status = -ENOMEM;
814                 goto func_end;
815         }
816
817         dev_context->dw_dsp_start_add = (u32) OMAP_GEM_BASE;
818         dev_context->dw_self_loop = (u32) NULL;
819         dev_context->dsp_per_clks = 0;
820         dev_context->dw_internal_size = OMAP_DSP_SIZE;
821         /*  Clear dev context MMU table entries.
822          *  These get set on bridge_io_on_loaded() call after program loaded. */
823         for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) {
824                 dev_context->atlb_entry[entry_ndx].ul_gpp_pa =
825                     dev_context->atlb_entry[entry_ndx].ul_dsp_va = 0;
826         }
827         dev_context->dw_dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
828                                                                  (config_param->
829                                                                   dw_mem_base
830                                                                   [3]),
831                                                                  config_param->
832                                                                  dw_mem_length
833                                                                  [3]);
834         if (!dev_context->dw_dsp_base_addr)
835                 status = -EPERM;
836
837         pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
838         if (pt_attrs != NULL) {
839                 /* Assuming that we use only DSP's memory map
840                  * until 0x4000:0000 , we would need only 1024
841                  * L1 enties i.e L1 size = 4K */
842                 pt_attrs->l1_size = 0x1000;
843                 align_size = pt_attrs->l1_size;
844                 /* Align sizes are expected to be power of 2 */
845                 /* we like to get aligned on L1 table size */
846                 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
847                                                      align_size, &pg_tbl_pa);
848
849                 /* Check if the PA is aligned for us */
850                 if ((pg_tbl_pa) & (align_size - 1)) {
851                         /* PA not aligned to page table size ,
852                          * try with more allocation and align */
853                         mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
854                                           pt_attrs->l1_size);
855                         /* we like to get aligned on L1 table size */
856                         pg_tbl_va =
857                             (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
858                                                      align_size, &pg_tbl_pa);
859                         /* We should be able to get aligned table now */
860                         pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
861                         pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
862                         pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
863                         /* Align the PA to the next 'align'  boundary */
864                         pt_attrs->l1_base_pa =
865                             ((pg_tbl_pa) +
866                              (align_size - 1)) & (~(align_size - 1));
867                         pt_attrs->l1_base_va =
868                             pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
869                 } else {
870                         /* We got aligned PA, cool */
871                         pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
872                         pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
873                         pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
874                         pt_attrs->l1_base_pa = pg_tbl_pa;
875                         pt_attrs->l1_base_va = pg_tbl_va;
876                 }
877                 if (pt_attrs->l1_base_va)
878                         memset((u8 *) pt_attrs->l1_base_va, 0x00,
879                                pt_attrs->l1_size);
880
881                 /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
882                  * L4 pages */
883                 pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
884                 pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
885                     pt_attrs->l2_num_pages;
886                 align_size = 4; /* Make it u32 aligned */
887                 /* we like to get aligned on L1 table size */
888                 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
889                                                      align_size, &pg_tbl_pa);
890                 pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
891                 pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
892                 pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
893                 pt_attrs->l2_base_pa = pg_tbl_pa;
894                 pt_attrs->l2_base_va = pg_tbl_va;
895
896                 if (pt_attrs->l2_base_va)
897                         memset((u8 *) pt_attrs->l2_base_va, 0x00,
898                                pt_attrs->l2_size);
899
900                 pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
901                                         sizeof(struct page_info), GFP_KERNEL);
902                 dev_dbg(bridge,
903                         "L1 pa %x, va %x, size %x\n L2 pa %x, va "
904                         "%x, size %x\n", pt_attrs->l1_base_pa,
905                         pt_attrs->l1_base_va, pt_attrs->l1_size,
906                         pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
907                         pt_attrs->l2_size);
908                 dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
909                         pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
910         }
911         if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
912             (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
913                 dev_context->pt_attrs = pt_attrs;
914         else
915                 status = -ENOMEM;
916
917         if (DSP_SUCCEEDED(status)) {
918                 spin_lock_init(&pt_attrs->pg_lock);
919                 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
920
921                 /* Set the Clock Divisor for the DSP module */
922                 udelay(5);
923                 /* MMU address is obtained from the host
924                  * resources struct */
925                 dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
926         }
927         if (DSP_SUCCEEDED(status)) {
928                 dev_context->hdev_obj = hdev_obj;
929                 /* Store current board state. */
930                 dev_context->dw_brd_state = BRD_STOPPED;
931                 dev_context->resources = resources;
932                 /* Return ptr to our device state to the DSP API for storage */
933                 *dev_cntxt = dev_context;
934         } else {
935                 if (pt_attrs != NULL) {
936                         kfree(pt_attrs->pg_info);
937
938                         if (pt_attrs->l2_tbl_alloc_va) {
939                                 mem_free_phys_mem((void *)
940                                                   pt_attrs->l2_tbl_alloc_va,
941                                                   pt_attrs->l2_tbl_alloc_pa,
942                                                   pt_attrs->l2_tbl_alloc_sz);
943                         }
944                         if (pt_attrs->l1_tbl_alloc_va) {
945                                 mem_free_phys_mem((void *)
946                                                   pt_attrs->l1_tbl_alloc_va,
947                                                   pt_attrs->l1_tbl_alloc_pa,
948                                                   pt_attrs->l1_tbl_alloc_sz);
949                         }
950                 }
951                 kfree(pt_attrs);
952                 kfree(dev_context);
953         }
954 func_end:
955         return status;
956 }
957
958 /*
959  *  ======== bridge_dev_ctrl ========
960  *      Receives device specific commands.
961  */
962 static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
963                                   u32 dw_cmd, IN OUT void *pargs)
964 {
965         int status = 0;
966         struct bridge_ioctl_extproc *pa_ext_proc =
967                                         (struct bridge_ioctl_extproc *)pargs;
968         s32 ndx;
969
970         switch (dw_cmd) {
971         case BRDIOCTL_CHNLREAD:
972                 break;
973         case BRDIOCTL_CHNLWRITE:
974                 break;
975         case BRDIOCTL_SETMMUCONFIG:
976                 /* store away dsp-mmu setup values for later use */
977                 for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++)
978                         dev_context->atlb_entry[ndx] = *pa_ext_proc;
979                 break;
980         case BRDIOCTL_DEEPSLEEP:
981         case BRDIOCTL_EMERGENCYSLEEP:
982                 /* Currently only DSP Idle is supported Need to update for
983                  * later releases */
984                 status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs);
985                 break;
986         case BRDIOCTL_WAKEUP:
987                 status = wake_dsp(dev_context, pargs);
988                 break;
989         case BRDIOCTL_CLK_CTRL:
990                 status = 0;
991                 /* Looking For Baseport Fix for Clocks */
992                 status = dsp_peripheral_clk_ctrl(dev_context, pargs);
993                 break;
994         case BRDIOCTL_PWR_HIBERNATE:
995                 status = handle_hibernation_from_dsp(dev_context);
996                 break;
997         case BRDIOCTL_PRESCALE_NOTIFY:
998                 status = pre_scale_dsp(dev_context, pargs);
999                 break;
1000         case BRDIOCTL_POSTSCALE_NOTIFY:
1001                 status = post_scale_dsp(dev_context, pargs);
1002                 break;
1003         case BRDIOCTL_CONSTRAINT_REQUEST:
1004                 status = handle_constraints_set(dev_context, pargs);
1005                 break;
1006         default:
1007                 status = -EPERM;
1008                 break;
1009         }
1010         return status;
1011 }
1012
1013 /*
1014  *  ======== bridge_dev_destroy ========
1015  *      Destroys the driver object.
1016  */
1017 static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
1018 {
1019         struct pg_table_attrs *pt_attrs;
1020         int status = 0;
1021         struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
1022             dev_ctxt;
1023         struct cfg_hostres *host_res;
1024         u32 shm_size;
1025         struct drv_data *drv_datap = dev_get_drvdata(bridge);
1026
1027         /* It should never happen */
1028         if (!dev_ctxt)
1029                 return -EFAULT;
1030
1031         /* first put the device to stop state */
1032         bridge_brd_delete(dev_context);
1033         if (dev_context->pt_attrs) {
1034                 pt_attrs = dev_context->pt_attrs;
1035                 kfree(pt_attrs->pg_info);
1036
1037                 if (pt_attrs->l2_tbl_alloc_va) {
1038                         mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
1039                                           pt_attrs->l2_tbl_alloc_pa,
1040                                           pt_attrs->l2_tbl_alloc_sz);
1041                 }
1042                 if (pt_attrs->l1_tbl_alloc_va) {
1043                         mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
1044                                           pt_attrs->l1_tbl_alloc_pa,
1045                                           pt_attrs->l1_tbl_alloc_sz);
1046                 }
1047                 kfree(pt_attrs);
1048
1049         }
1050
1051         if (dev_context->resources) {
1052                 host_res = dev_context->resources;
1053                 shm_size = drv_datap->shm_size;
1054                 if (shm_size >= 0x10000) {
1055                         if ((host_res->dw_mem_base[1]) &&
1056                             (host_res->dw_mem_phys[1])) {
1057                                 mem_free_phys_mem((void *)
1058                                                   host_res->dw_mem_base
1059                                                   [1],
1060                                                   host_res->dw_mem_phys
1061                                                   [1], shm_size);
1062                         }
1063                 } else {
1064                         dev_dbg(bridge, "%s: Error getting shm size "
1065                                 "from registry: %x. Not calling "
1066                                 "mem_free_phys_mem\n", __func__,
1067                                 status);
1068                 }
1069                 host_res->dw_mem_base[1] = 0;
1070                 host_res->dw_mem_phys[1] = 0;
1071
1072                 if (host_res->dw_mem_base[0])
1073                         iounmap((void *)host_res->dw_mem_base[0]);
1074                 if (host_res->dw_mem_base[2])
1075                         iounmap((void *)host_res->dw_mem_base[2]);
1076                 if (host_res->dw_mem_base[3])
1077                         iounmap((void *)host_res->dw_mem_base[3]);
1078                 if (host_res->dw_mem_base[4])
1079                         iounmap((void *)host_res->dw_mem_base[4]);
1080                 if (host_res->dw_dmmu_base)
1081                         iounmap(host_res->dw_dmmu_base);
1082                 if (host_res->dw_per_base)
1083                         iounmap(host_res->dw_per_base);
1084                 if (host_res->dw_per_pm_base)
1085                         iounmap((void *)host_res->dw_per_pm_base);
1086                 if (host_res->dw_core_pm_base)
1087                         iounmap((void *)host_res->dw_core_pm_base);
1088                 if (host_res->dw_sys_ctrl_base)
1089                         iounmap(host_res->dw_sys_ctrl_base);
1090
1091                 host_res->dw_mem_base[0] = (u32) NULL;
1092                 host_res->dw_mem_base[2] = (u32) NULL;
1093                 host_res->dw_mem_base[3] = (u32) NULL;
1094                 host_res->dw_mem_base[4] = (u32) NULL;
1095                 host_res->dw_dmmu_base = NULL;
1096                 host_res->dw_sys_ctrl_base = NULL;
1097
1098                 kfree(host_res);
1099         }
1100
1101         /* Free the driver's device context: */
1102         kfree(drv_datap->base_img);
1103         kfree(drv_datap);
1104         dev_set_drvdata(bridge, NULL);
1105         kfree((void *)dev_ctxt);
1106         return status;
1107 }
1108
1109 static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
1110                                    u32 dsp_dest_addr, u32 dsp_src_addr,
1111                                    u32 ul_num_bytes, u32 mem_type)
1112 {
1113         int status = 0;
1114         u32 src_addr = dsp_src_addr;
1115         u32 dest_addr = dsp_dest_addr;
1116         u32 copy_bytes = 0;
1117         u32 total_bytes = ul_num_bytes;
1118         u8 host_buf[BUFFERSIZE];
1119         struct bridge_dev_context *dev_context = dev_ctxt;
1120         while ((total_bytes > 0) && DSP_SUCCEEDED(status)) {
1121                 copy_bytes =
1122                     total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes;
1123                 /* Read from External memory */
1124                 status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr,
1125                                            copy_bytes, mem_type);
1126                 if (DSP_SUCCEEDED(status)) {
1127                         if (dest_addr < (dev_context->dw_dsp_start_add +
1128                                          dev_context->dw_internal_size)) {
1129                                 /* Write to Internal memory */
1130                                 status = write_dsp_data(dev_ctxt, host_buf,
1131                                                         dest_addr, copy_bytes,
1132                                                         mem_type);
1133                         } else {
1134                                 /* Write to External memory */
1135                                 status =
1136                                     write_ext_dsp_data(dev_ctxt, host_buf,
1137                                                        dest_addr, copy_bytes,
1138                                                        mem_type, false);
1139                         }
1140                 }
1141                 total_bytes -= copy_bytes;
1142                 src_addr += copy_bytes;
1143                 dest_addr += copy_bytes;
1144         }
1145         return status;
1146 }
1147
1148 /* Mem Write does not halt the DSP to write unlike bridge_brd_write */
1149 static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
1150                                     IN u8 *host_buff, u32 dsp_addr,
1151                                     u32 ul_num_bytes, u32 mem_type)
1152 {
1153         int status = 0;
1154         struct bridge_dev_context *dev_context = dev_ctxt;
1155         u32 ul_remain_bytes = 0;
1156         u32 ul_bytes = 0;
1157         ul_remain_bytes = ul_num_bytes;
1158         while (ul_remain_bytes > 0 && DSP_SUCCEEDED(status)) {
1159                 ul_bytes =
1160                     ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
1161                 if (dsp_addr < (dev_context->dw_dsp_start_add +
1162                                  dev_context->dw_internal_size)) {
1163                         status =
1164                             write_dsp_data(dev_ctxt, host_buff, dsp_addr,
1165                                            ul_bytes, mem_type);
1166                 } else {
1167                         status = write_ext_dsp_data(dev_ctxt, host_buff,
1168                                                     dsp_addr, ul_bytes,
1169                                                     mem_type, true);
1170                 }
1171                 ul_remain_bytes -= ul_bytes;
1172                 dsp_addr += ul_bytes;
1173                 host_buff = host_buff + ul_bytes;
1174         }
1175         return status;
1176 }
1177
1178 /*
1179  *  ======== bridge_brd_mem_map ========
1180  *      This function maps MPU buffer to the DSP address space. It performs
1181  *  linear to physical address translation if required. It translates each
1182  *  page since linear addresses can be physically non-contiguous
1183  *  All address & size arguments are assumed to be page aligned (in proc.c)
1184  *
1185  *  TODO: Disable MMU while updating the page tables (but that'll stall DSP)
1186  */
1187 static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
1188                                   u32 ul_mpu_addr, u32 virt_addr,
1189                                   u32 ul_num_bytes, u32 ul_map_attr,
1190                                   struct page **mapped_pages)
1191 {
1192         u32 attrs;
1193         int status = 0;
1194         struct bridge_dev_context *dev_context = dev_ctxt;
1195         struct hw_mmu_map_attrs_t hw_attrs;
1196         struct vm_area_struct *vma;
1197         struct mm_struct *mm = current->mm;
1198         u32 write = 0;
1199         u32 num_usr_pgs = 0;
1200         struct page *mapped_page, *pg;
1201         s32 pg_num;
1202         u32 va = virt_addr;
1203         struct task_struct *curr_task = current;
1204         u32 pg_i = 0;
1205         u32 mpu_addr, pa;
1206
1207         dev_dbg(bridge,
1208                 "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
1209                 __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
1210                 ul_map_attr);
1211         if (ul_num_bytes == 0)
1212                 return -EINVAL;
1213
1214         if (ul_map_attr & DSP_MAP_DIR_MASK) {
1215                 attrs = ul_map_attr;
1216         } else {
1217                 /* Assign default attributes */
1218                 attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
1219         }
1220         /* Take mapping properties */
1221         if (attrs & DSP_MAPBIGENDIAN)
1222                 hw_attrs.endianism = HW_BIG_ENDIAN;
1223         else
1224                 hw_attrs.endianism = HW_LITTLE_ENDIAN;
1225
1226         hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
1227             ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
1228         /* Ignore element_size if mixed_size is enabled */
1229         if (hw_attrs.mixed_size == 0) {
1230                 if (attrs & DSP_MAPELEMSIZE8) {
1231                         /* Size is 8 bit */
1232                         hw_attrs.element_size = HW_ELEM_SIZE8BIT;
1233                 } else if (attrs & DSP_MAPELEMSIZE16) {
1234                         /* Size is 16 bit */
1235                         hw_attrs.element_size = HW_ELEM_SIZE16BIT;
1236                 } else if (attrs & DSP_MAPELEMSIZE32) {
1237                         /* Size is 32 bit */
1238                         hw_attrs.element_size = HW_ELEM_SIZE32BIT;
1239                 } else if (attrs & DSP_MAPELEMSIZE64) {
1240                         /* Size is 64 bit */
1241                         hw_attrs.element_size = HW_ELEM_SIZE64BIT;
1242                 } else {
1243                         /*
1244                          * Mixedsize isn't enabled, so size can't be
1245                          * zero here
1246                          */
1247                         return -EINVAL;
1248                 }
1249         }
1250         if (attrs & DSP_MAPDONOTLOCK)
1251                 hw_attrs.donotlockmpupage = 1;
1252         else
1253                 hw_attrs.donotlockmpupage = 0;
1254
1255         if (attrs & DSP_MAPVMALLOCADDR) {
1256                 return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
1257                                        ul_num_bytes, &hw_attrs);
1258         }
1259         /*
1260          * Do OS-specific user-va to pa translation.
1261          * Combine physically contiguous regions to reduce TLBs.
1262          * Pass the translated pa to pte_update.
1263          */
1264         if ((attrs & DSP_MAPPHYSICALADDR)) {
1265                 status = pte_update(dev_context, ul_mpu_addr, virt_addr,
1266                                     ul_num_bytes, &hw_attrs);
1267                 goto func_cont;
1268         }
1269
1270         /*
1271          * Important Note: ul_mpu_addr is mapped from user application process
1272          * to current process - it must lie completely within the current
1273          * virtual memory address space in order to be of use to us here!
1274          */
1275         down_read(&mm->mmap_sem);
1276         vma = find_vma(mm, ul_mpu_addr);
1277         if (vma)
1278                 dev_dbg(bridge,
1279                         "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
1280                         "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1281                         ul_num_bytes, vma->vm_start, vma->vm_end,
1282                         vma->vm_flags);
1283
1284         /*
1285          * It is observed that under some circumstances, the user buffer is
1286          * spread across several VMAs. So loop through and check if the entire
1287          * user buffer is covered
1288          */
1289         while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
1290                 /* jump to the next VMA region */
1291                 vma = find_vma(mm, vma->vm_end + 1);
1292                 dev_dbg(bridge,
1293                         "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
1294                         "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1295                         ul_num_bytes, vma->vm_start, vma->vm_end,
1296                         vma->vm_flags);
1297         }
1298         if (!vma) {
1299                 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
1300                        __func__, ul_mpu_addr, ul_num_bytes);
1301                 status = -EINVAL;
1302                 up_read(&mm->mmap_sem);
1303                 goto func_cont;
1304         }
1305
1306         if (vma->vm_flags & VM_IO) {
1307                 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1308                 mpu_addr = ul_mpu_addr;
1309
1310                 /* Get the physical addresses for user buffer */
1311                 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1312                         pa = user_va2_pa(mm, mpu_addr);
1313                         if (!pa) {
1314                                 status = -EPERM;
1315                                 pr_err("DSPBRIDGE: VM_IO mapping physical"
1316                                        "address is invalid\n");
1317                                 break;
1318                         }
1319                         if (pfn_valid(__phys_to_pfn(pa))) {
1320                                 pg = PHYS_TO_PAGE(pa);
1321                                 get_page(pg);
1322                                 if (page_count(pg) < 1) {
1323                                         pr_err("Bad page in VM_IO buffer\n");
1324                                         bad_page_dump(pa, pg);
1325                                 }
1326                         }
1327                         status = pte_set(dev_context->pt_attrs, pa,
1328                                          va, HW_PAGE_SIZE4KB, &hw_attrs);
1329                         if (DSP_FAILED(status))
1330                                 break;
1331
1332                         va += HW_PAGE_SIZE4KB;
1333                         mpu_addr += HW_PAGE_SIZE4KB;
1334                         pa += HW_PAGE_SIZE4KB;
1335                 }
1336         } else {
1337                 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1338                 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
1339                         write = 1;
1340
1341                 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1342                         pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
1343                                                 write, 1, &mapped_page, NULL);
1344                         if (pg_num > 0) {
1345                                 if (page_count(mapped_page) < 1) {
1346                                         pr_err("Bad page count after doing"
1347                                                "get_user_pages on"
1348                                                "user buffer\n");
1349                                         bad_page_dump(page_to_phys(mapped_page),
1350                                                       mapped_page);
1351                                 }
1352                                 status = pte_set(dev_context->pt_attrs,
1353                                                  page_to_phys(mapped_page), va,
1354                                                  HW_PAGE_SIZE4KB, &hw_attrs);
1355                                 if (DSP_FAILED(status))
1356                                         break;
1357
1358                                 if (mapped_pages)
1359                                         mapped_pages[pg_i] = mapped_page;
1360
1361                                 va += HW_PAGE_SIZE4KB;
1362                                 ul_mpu_addr += HW_PAGE_SIZE4KB;
1363                         } else {
1364                                 pr_err("DSPBRIDGE: get_user_pages FAILED,"
1365                                        "MPU addr = 0x%x,"
1366                                        "vma->vm_flags = 0x%lx,"
1367                                        "get_user_pages Err"
1368                                        "Value = %d, Buffer"
1369                                        "size=0x%x\n", ul_mpu_addr,
1370                                        vma->vm_flags, pg_num, ul_num_bytes);
1371                                 status = -EPERM;
1372                                 break;
1373                         }
1374                 }
1375         }
1376         up_read(&mm->mmap_sem);
1377 func_cont:
1378         if (DSP_SUCCEEDED(status)) {
1379                 status = 0;
1380         } else {
1381                 /*
1382                  * Roll out the mapped pages incase it failed in middle of
1383                  * mapping
1384                  */
1385                 if (pg_i) {
1386                         bridge_brd_mem_un_map(dev_context, virt_addr,
1387                                            (pg_i * PG_SIZE4K));
1388                 }
1389                 status = -EPERM;
1390         }
1391         /*
1392          * In any case, flush the TLB
1393          * This is called from here instead from pte_update to avoid unnecessary
1394          * repetition while mapping non-contiguous physical regions of a virtual
1395          * region
1396          */
1397         flush_all(dev_context);
1398         dev_dbg(bridge, "%s status %x\n", __func__, status);
1399         return status;
1400 }
1401
1402 /*
1403  *  ======== bridge_brd_mem_un_map ========
1404  *      Invalidate the PTEs for the DSP VA block to be unmapped.
1405  *
1406  *      PTEs of a mapped memory block are contiguous in any page table
1407  *      So, instead of looking up the PTE address for every 4K block,
1408  *      we clear consecutive PTEs until we unmap all the bytes
1409  */
1410 static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
1411                                      u32 virt_addr, u32 ul_num_bytes)
1412 {
1413         u32 l1_base_va;
1414         u32 l2_base_va;
1415         u32 l2_base_pa;
1416         u32 l2_page_num;
1417         u32 pte_val;
1418         u32 pte_size;
1419         u32 pte_count;
1420         u32 pte_addr_l1;
1421         u32 pte_addr_l2 = 0;
1422         u32 rem_bytes;
1423         u32 rem_bytes_l2;
1424         u32 va_curr;
1425         struct page *pg = NULL;
1426         int status = 0;
1427         struct bridge_dev_context *dev_context = dev_ctxt;
1428         struct pg_table_attrs *pt = dev_context->pt_attrs;
1429         u32 temp;
1430         u32 paddr;
1431         u32 numof4k_pages = 0;
1432
1433         va_curr = virt_addr;
1434         rem_bytes = ul_num_bytes;
1435         rem_bytes_l2 = 0;
1436         l1_base_va = pt->l1_base_va;
1437         pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1438         dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
1439                 "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
1440                 ul_num_bytes, l1_base_va, pte_addr_l1);
1441
1442         while (rem_bytes && (DSP_SUCCEEDED(status))) {
1443                 u32 va_curr_orig = va_curr;
1444                 /* Find whether the L1 PTE points to a valid L2 PT */
1445                 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1446                 pte_val = *(u32 *) pte_addr_l1;
1447                 pte_size = hw_mmu_pte_size_l1(pte_val);
1448
1449                 if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
1450                         goto skip_coarse_page;
1451
1452                 /*
1453                  * Get the L2 PA from the L1 PTE, and find
1454                  * corresponding L2 VA
1455                  */
1456                 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1457                 l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1458                 l2_page_num =
1459                     (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1460                 /*
1461                  * Find the L2 PTE address from which we will start
1462                  * clearing, the number of PTEs to be cleared on this
1463                  * page, and the size of VA space that needs to be
1464                  * cleared on this L2 page
1465                  */
1466                 pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
1467                 pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
1468                 pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
1469                 if (rem_bytes < (pte_count * PG_SIZE4K))
1470                         pte_count = rem_bytes / PG_SIZE4K;
1471                 rem_bytes_l2 = pte_count * PG_SIZE4K;
1472
1473                 /*
1474                  * Unmap the VA space on this L2 PT. A quicker way
1475                  * would be to clear pte_count entries starting from
1476                  * pte_addr_l2. However, below code checks that we don't
1477                  * clear invalid entries or less than 64KB for a 64KB
1478                  * entry. Similar checking is done for L1 PTEs too
1479                  * below
1480                  */
1481                 while (rem_bytes_l2 && (DSP_SUCCEEDED(status))) {
1482                         pte_val = *(u32 *) pte_addr_l2;
1483                         pte_size = hw_mmu_pte_size_l2(pte_val);
1484                         /* va_curr aligned to pte_size? */
1485                         if (pte_size == 0 || rem_bytes_l2 < pte_size ||
1486                             va_curr & (pte_size - 1)) {
1487                                 status = -EPERM;
1488                                 break;
1489                         }
1490
1491                         /* Collect Physical addresses from VA */
1492                         paddr = (pte_val & ~(pte_size - 1));
1493                         if (pte_size == HW_PAGE_SIZE64KB)
1494                                 numof4k_pages = 16;
1495                         else
1496                                 numof4k_pages = 1;
1497                         temp = 0;
1498                         while (temp++ < numof4k_pages) {
1499                                 if (!pfn_valid(__phys_to_pfn(paddr))) {
1500                                         paddr += HW_PAGE_SIZE4KB;
1501                                         continue;
1502                                 }
1503                                 pg = PHYS_TO_PAGE(paddr);
1504                                 if (page_count(pg) < 1) {
1505                                         pr_info("DSPBRIDGE: UNMAP function: "
1506                                                 "COUNT 0 FOR PA 0x%x, size = "
1507                                                 "0x%x\n", paddr, ul_num_bytes);
1508                                         bad_page_dump(paddr, pg);
1509                                 } else {
1510                                         set_page_dirty(pg);
1511                                         page_cache_release(pg);
1512                                 }
1513                                 paddr += HW_PAGE_SIZE4KB;
1514                         }
1515                         if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)
1516                             == RET_FAIL) {
1517                                 status = -EPERM;
1518                                 goto EXIT_LOOP;
1519                         }
1520
1521                         status = 0;
1522                         rem_bytes_l2 -= pte_size;
1523                         va_curr += pte_size;
1524                         pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
1525                 }
1526                 spin_lock(&pt->pg_lock);
1527                 if (rem_bytes_l2 == 0) {
1528                         pt->pg_info[l2_page_num].num_entries -= pte_count;
1529                         if (pt->pg_info[l2_page_num].num_entries == 0) {
1530                                 /*
1531                                  * Clear the L1 PTE pointing to the L2 PT
1532                                  */
1533                                 if (hw_mmu_pte_clear(l1_base_va, va_curr_orig,
1534                                                      HW_MMU_COARSE_PAGE_SIZE) ==
1535                                     RET_OK)
1536                                         status = 0;
1537                                 else {
1538                                         status = -EPERM;
1539                                         spin_unlock(&pt->pg_lock);
1540                                         goto EXIT_LOOP;
1541                                 }
1542                         }
1543                         rem_bytes -= pte_count * PG_SIZE4K;
1544                 } else
1545                         status = -EPERM;
1546
1547                 spin_unlock(&pt->pg_lock);
1548                 continue;
1549 skip_coarse_page:
1550                 /* va_curr aligned to pte_size? */
1551                 /* pte_size = 1 MB or 16 MB */
1552                 if (pte_size == 0 || rem_bytes < pte_size ||
1553                     va_curr & (pte_size - 1)) {
1554                         status = -EPERM;
1555                         break;
1556                 }
1557
1558                 if (pte_size == HW_PAGE_SIZE1MB)
1559                         numof4k_pages = 256;
1560                 else
1561                         numof4k_pages = 4096;
1562                 temp = 0;
1563                 /* Collect Physical addresses from VA */
1564                 paddr = (pte_val & ~(pte_size - 1));
1565                 while (temp++ < numof4k_pages) {
1566                         if (pfn_valid(__phys_to_pfn(paddr))) {
1567                                 pg = PHYS_TO_PAGE(paddr);
1568                                 if (page_count(pg) < 1) {
1569                                         pr_info("DSPBRIDGE: UNMAP function: "
1570                                                 "COUNT 0 FOR PA 0x%x, size = "
1571                                                 "0x%x\n", paddr, ul_num_bytes);
1572                                         bad_page_dump(paddr, pg);
1573                                 } else {
1574                                         set_page_dirty(pg);
1575                                         page_cache_release(pg);
1576                                 }
1577                         }
1578                         paddr += HW_PAGE_SIZE4KB;
1579                 }
1580                 if (hw_mmu_pte_clear(l1_base_va, va_curr, pte_size) == RET_OK) {
1581                         status = 0;
1582                         rem_bytes -= pte_size;
1583                         va_curr += pte_size;
1584                 } else {
1585                         status = -EPERM;
1586                         goto EXIT_LOOP;
1587                 }
1588         }
1589         /*
1590          * It is better to flush the TLB here, so that any stale old entries
1591          * get flushed
1592          */
1593 EXIT_LOOP:
1594         flush_all(dev_context);
1595         dev_dbg(bridge,
1596                 "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
1597                 " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
1598                 pte_addr_l2, rem_bytes, rem_bytes_l2, status);
1599         return status;
1600 }
1601
1602 /*
1603  *  ======== user_va2_pa ========
1604  *  Purpose:
1605  *      This function walks through the page tables to convert a userland
1606  *      virtual address to physical address
1607  */
1608 static u32 user_va2_pa(struct mm_struct *mm, u32 address)
1609 {
1610         pgd_t *pgd;
1611         pmd_t *pmd;
1612         pte_t *ptep, pte;
1613
1614         pgd = pgd_offset(mm, address);
1615         if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
1616                 pmd = pmd_offset(pgd, address);
1617                 if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
1618                         ptep = pte_offset_map(pmd, address);
1619                         if (ptep) {
1620                                 pte = *ptep;
1621                                 if (pte_present(pte))
1622                                         return pte & PAGE_MASK;
1623                         }
1624                 }
1625         }
1626
1627         return 0;
1628 }
1629
1630 /*
1631  *  ======== pte_update ========
1632  *      This function calculates the optimum page-aligned addresses and sizes
1633  *      Caller must pass page-aligned values
1634  */
1635 static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
1636                              u32 va, u32 size,
1637                              struct hw_mmu_map_attrs_t *map_attrs)
1638 {
1639         u32 i;
1640         u32 all_bits;
1641         u32 pa_curr = pa;
1642         u32 va_curr = va;
1643         u32 num_bytes = size;
1644         struct bridge_dev_context *dev_context = dev_ctxt;
1645         int status = 0;
1646         u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
1647                 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
1648         };
1649
1650         while (num_bytes && DSP_SUCCEEDED(status)) {
1651                 /* To find the max. page size with which both PA & VA are
1652                  * aligned */
1653                 all_bits = pa_curr | va_curr;
1654
1655                 for (i = 0; i < 4; i++) {
1656                         if ((num_bytes >= page_size[i]) && ((all_bits &
1657                                                              (page_size[i] -
1658                                                               1)) == 0)) {
1659                                 status =
1660                                     pte_set(dev_context->pt_attrs, pa_curr,
1661                                             va_curr, page_size[i], map_attrs);
1662                                 pa_curr += page_size[i];
1663                                 va_curr += page_size[i];
1664                                 num_bytes -= page_size[i];
1665                                 /* Don't try smaller sizes. Hopefully we have
1666                                  * reached an address aligned to a bigger page
1667                                  * size */
1668                                 break;
1669                         }
1670                 }
1671         }
1672
1673         return status;
1674 }
1675
1676 /*
1677  *  ======== pte_set ========
1678  *      This function calculates PTE address (MPU virtual) to be updated
1679  *      It also manages the L2 page tables
1680  */
1681 static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
1682                           u32 size, struct hw_mmu_map_attrs_t *attrs)
1683 {
1684         u32 i;
1685         u32 pte_val;
1686         u32 pte_addr_l1;
1687         u32 pte_size;
1688         /* Base address of the PT that will be updated */
1689         u32 pg_tbl_va;
1690         u32 l1_base_va;
1691         /* Compiler warns that the next three variables might be used
1692          * uninitialized in this function. Doesn't seem so. Working around,
1693          * anyways. */
1694         u32 l2_base_va = 0;
1695         u32 l2_base_pa = 0;
1696         u32 l2_page_num = 0;
1697         int status = 0;
1698
1699         l1_base_va = pt->l1_base_va;
1700         pg_tbl_va = l1_base_va;
1701         if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
1702                 /* Find whether the L1 PTE points to a valid L2 PT */
1703                 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1704                 if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
1705                         pte_val = *(u32 *) pte_addr_l1;
1706                         pte_size = hw_mmu_pte_size_l1(pte_val);
1707                 } else {
1708                         return -EPERM;
1709                 }
1710                 spin_lock(&pt->pg_lock);
1711                 if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1712                         /* Get the L2 PA from the L1 PTE, and find
1713                          * corresponding L2 VA */
1714                         l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1715                         l2_base_va =
1716                             l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1717                         l2_page_num =
1718                             (l2_base_pa -
1719                              pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1720                 } else if (pte_size == 0) {
1721                         /* L1 PTE is invalid. Allocate a L2 PT and
1722                          * point the L1 PTE to it */
1723                         /* Find a free L2 PT. */
1724                         for (i = 0; (i < pt->l2_num_pages) &&
1725                              (pt->pg_info[i].num_entries != 0); i++)
1726                                 ;;
1727                         if (i < pt->l2_num_pages) {
1728                                 l2_page_num = i;
1729                                 l2_base_pa = pt->l2_base_pa + (l2_page_num *
1730                                                 HW_MMU_COARSE_PAGE_SIZE);
1731                                 l2_base_va = pt->l2_base_va + (l2_page_num *
1732                                                 HW_MMU_COARSE_PAGE_SIZE);
1733                                 /* Endianness attributes are ignored for
1734                                  * HW_MMU_COARSE_PAGE_SIZE */
1735                                 status =
1736                                     hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
1737                                                    HW_MMU_COARSE_PAGE_SIZE,
1738                                                    attrs);
1739                         } else {
1740                                 status = -ENOMEM;
1741                         }
1742                 } else {
1743                         /* Found valid L1 PTE of another size.
1744                          * Should not overwrite it. */
1745                         status = -EPERM;
1746                 }
1747                 if (DSP_SUCCEEDED(status)) {
1748                         pg_tbl_va = l2_base_va;
1749                         if (size == HW_PAGE_SIZE64KB)
1750                                 pt->pg_info[l2_page_num].num_entries += 16;
1751                         else
1752                                 pt->pg_info[l2_page_num].num_entries++;
1753                         dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
1754                                 "%x, num_entries %x\n", l2_base_va,
1755                                 l2_base_pa, l2_page_num,
1756                                 pt->pg_info[l2_page_num].num_entries);
1757                 }
1758                 spin_unlock(&pt->pg_lock);
1759         }
1760         if (DSP_SUCCEEDED(status)) {
1761                 dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
1762                         pg_tbl_va, pa, va, size);
1763                 dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
1764                         "mixed_size %x\n", attrs->endianism,
1765                         attrs->element_size, attrs->mixed_size);
1766                 status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
1767         }
1768
1769         return status;
1770 }
1771
1772 /* Memory map kernel VA -- memory allocated with vmalloc */
1773 static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
1774                                   u32 ul_mpu_addr, u32 virt_addr,
1775                                   u32 ul_num_bytes,
1776                                   struct hw_mmu_map_attrs_t *hw_attrs)
1777 {
1778         int status = 0;
1779         struct page *page[1];
1780         u32 i;
1781         u32 pa_curr;
1782         u32 pa_next;
1783         u32 va_curr;
1784         u32 size_curr;
1785         u32 num_pages;
1786         u32 pa;
1787         u32 num_of4k_pages;
1788         u32 temp = 0;
1789
1790         /*
1791          * Do Kernel va to pa translation.
1792          * Combine physically contiguous regions to reduce TLBs.
1793          * Pass the translated pa to pte_update.
1794          */
1795         num_pages = ul_num_bytes / PAGE_SIZE;   /* PAGE_SIZE = OS page size */
1796         i = 0;
1797         va_curr = ul_mpu_addr;
1798         page[0] = vmalloc_to_page((void *)va_curr);
1799         pa_next = page_to_phys(page[0]);
1800         while (DSP_SUCCEEDED(status) && (i < num_pages)) {
1801                 /*
1802                  * Reuse pa_next from the previous iteraion to avoid
1803                  * an extra va2pa call
1804                  */
1805                 pa_curr = pa_next;
1806                 size_curr = PAGE_SIZE;
1807                 /*
1808                  * If the next page is physically contiguous,
1809                  * map it with the current one by increasing
1810                  * the size of the region to be mapped
1811                  */
1812                 while (++i < num_pages) {
1813                         page[0] =
1814                             vmalloc_to_page((void *)(va_curr + size_curr));
1815                         pa_next = page_to_phys(page[0]);
1816
1817                         if (pa_next == (pa_curr + size_curr))
1818                                 size_curr += PAGE_SIZE;
1819                         else
1820                                 break;
1821
1822                 }
1823                 if (pa_next == 0) {
1824                         status = -ENOMEM;
1825                         break;
1826                 }
1827                 pa = pa_curr;
1828                 num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
1829                 while (temp++ < num_of4k_pages) {
1830                         get_page(PHYS_TO_PAGE(pa));
1831                         pa += HW_PAGE_SIZE4KB;
1832                 }
1833                 status = pte_update(dev_context, pa_curr, virt_addr +
1834                                     (va_curr - ul_mpu_addr), size_curr,
1835                                     hw_attrs);
1836                 va_curr += size_curr;
1837         }
1838         if (DSP_SUCCEEDED(status))
1839                 status = 0;
1840         else
1841                 status = -EPERM;
1842
1843         /*
1844          * In any case, flush the TLB
1845          * This is called from here instead from pte_update to avoid unnecessary
1846          * repetition while mapping non-contiguous physical regions of a virtual
1847          * region
1848          */
1849         flush_all(dev_context);
1850         dev_dbg(bridge, "%s status %x\n", __func__, status);
1851         return status;
1852 }
1853
1854 /*
1855  *  ======== wait_for_start ========
1856  *      Wait for the singal from DSP that it has started, or time out.
1857  */
1858 bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr)
1859 {
1860         u16 timeout = TIHELEN_ACKTIMEOUT;
1861
1862         /*  Wait for response from board */
1863         while (*((volatile u16 *)dw_sync_addr) && --timeout)
1864                 udelay(10);
1865
1866         /*  If timed out: return false */
1867         if (!timeout) {
1868                 pr_err("%s: Timed out waiting DSP to Start\n", __func__);
1869                 return false;
1870         }
1871         return true;
1872 }