Merge branch 'irq-final-for-linus-v2' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / staging / westbridge / astoria / arch / arm / mach-omap2 / cyashalomap_kernel.c
1 /* Cypress WestBridge OMAP3430 Kernel Hal source file (cyashalomap_kernel.c)
2 ## ===========================
3 ## Copyright (C) 2010  Cypress Semiconductor
4 ##
5 ## This program is free software; you can redistribute it and/or
6 ## modify it under the terms of the GNU General Public License
7 ## as published by the Free Software Foundation; either version 2
8 ## of the License, or (at your option) any later version.
9 ##
10 ## This program is distributed in the hope that it will be useful,
11 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 ## GNU General Public License for more details.
14 ##
15 ## You should have received a copy of the GNU General Public License
16 ## along with this program; if not, write to the Free Software
17 ## Foundation, Inc., 51 Franklin Street, Fifth Floor,
18 ## Boston, MA  02110-1301, USA.
19 ## ===========================
20 */
21
22 #ifdef CONFIG_MACH_OMAP3_WESTBRIDGE_AST_PNAND_HAL
23
24 #include <linux/fs.h>
25 #include <linux/ioport.h>
26 #include <linux/timer.h>
27 #include <linux/gpio.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/scatterlist.h>
31 #include <linux/mm.h>
32 #include <linux/irq.h>
33 #include <linux/slab.h>
34 #include <linux/sched.h>
35 /* include seems broken moving for patch submission
36  * #include <mach/mux.h>
37  * #include <mach/gpmc.h>
38  * #include <mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h>
39  * #include <mach/westbridge/westbridge-omap3-pnand-hal/cyasomapdev_kernel.h>
40  * #include <mach/westbridge/westbridge-omap3-pnand-hal/cyasmemmap.h>
41  * #include <linux/westbridge/cyaserr.h>
42  * #include <linux/westbridge/cyasregs.h>
43  * #include <linux/westbridge/cyasdma.h>
44  * #include <linux/westbridge/cyasintr.h>
45  */
46 #include <linux/../../arch/arm/plat-omap/include/plat/mux.h>
47 #include <linux/../../arch/arm/plat-omap/include/plat/gpmc.h>
48 #include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h"
49 #include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasomapdev_kernel.h"
50 #include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasmemmap.h"
51 #include "../../../include/linux/westbridge/cyaserr.h"
52 #include "../../../include/linux/westbridge/cyasregs.h"
53 #include "../../../include/linux/westbridge/cyasdma.h"
54 #include "../../../include/linux/westbridge/cyasintr.h"
55
56 #define HAL_REV "1.1.0"
57
58 /*
59  * uncomment to enable 16bit pnand interface
60  */
61 #define PNAND_16BIT_MODE
62
63 /*
64  * selects one of 3 versions of pnand_lbd_read()
65  * PNAND_LBD_READ_NO_PFE - original 8/16 bit code
66  *    reads through the gpmc CONTROLLER REGISTERS
67  * ENABLE_GPMC_PF_ENGINE - USES GPMC PFE FIFO reads, in 8 bit mode,
68  *     same speed as the above
69  * PFE_LBD_READ_V2 - slightly diffrenet, performance same as above
70  */
71 #define PNAND_LBD_READ_NO_PFE
72 /* #define ENABLE_GPMC_PF_ENGINE */
73 /* #define  PFE_LBD_READ_V2 */
74
75 /*
76  * westbrige astoria ISR options to limit number of
77  * back to back DMA transfers per ISR interrupt
78  */
79 #define MAX_DRQ_LOOPS_IN_ISR 4
80
81 /*
82  * debug prints enabling
83  *#define DBGPRN_ENABLED
84  *#define DBGPRN_DMA_SETUP_RD
85  *#define DBGPRN_DMA_SETUP_WR
86  */
87
88
89 /*
90  * For performance reasons, we handle storage endpoint transfers upto 4 KB
91  * within the HAL itself.
92  */
93  #define CYASSTORAGE_WRITE_EP_NUM       (4)
94  #define CYASSTORAGE_READ_EP_NUM        (8)
95
96 /*
97  *  size of DMA packet HAL can accept from Storage API
98  *  HAL will fragment it into smaller chunks that the P port can accept
99  */
100 #define CYASSTORAGE_MAX_XFER_SIZE       (2*32768)
101
102 /*
103  *  P port MAX DMA packet size according to interface/ep configurartion
104  */
105 #define HAL_DMA_PKT_SZ 512
106
107 #define is_storage_e_p(ep) (((ep) == 2) || ((ep) == 4) || \
108                                 ((ep) == 6) || ((ep) == 8))
109
110 /*
111  * persistant, stores current GPMC interface cfg mode
112  */
113 static uint8_t pnand_16bit;
114
115 /*
116  * keep processing new WB DRQ in ISR untill all handled (performance feature)
117  */
118 #define PROCESS_MULTIPLE_DRQ_IN_ISR (1)
119
120
121 /*
122  * ASTORIA PNAND IF COMMANDS, CASDO - READ, CASDI - WRITE
123  */
124 #define CASDO 0x05
125 #define CASDI 0x85
126 #define RDPAGE_B1   0x00
127 #define RDPAGE_B2   0x30
128 #define PGMPAGE_B1  0x80
129 #define PGMPAGE_B2  0x10
130
131 /*
132  * The type of DMA operation, per endpoint
133  */
134 typedef enum cy_as_hal_dma_type {
135         cy_as_hal_read,
136         cy_as_hal_write,
137         cy_as_hal_none
138 } cy_as_hal_dma_type;
139
140
141 /*
142  * SG list halpers defined in scaterlist.h
143 #define sg_is_chain(sg)         ((sg)->page_link & 0x01)
144 #define sg_is_last(sg)          ((sg)->page_link & 0x02)
145 #define sg_chain_ptr(sg)        \
146         ((struct scatterlist *) ((sg)->page_link & ~0x03))
147 */
148 typedef struct cy_as_hal_endpoint_dma {
149         cy_bool buffer_valid;
150         uint8_t *data_p;
151         uint32_t size;
152         /*
153          * sg_list_enabled - if true use, r/w DMA transfers use sg list,
154          *              FALSE use pointer to a buffer
155          * sg_p - pointer to the owner's sg list, of there is such
156          *              (like blockdriver)
157          * dma_xfer_sz - size of the next dma xfer on P port
158          * seg_xfer_cnt -  counts xfered bytes for in current sg_list
159          *              memory segment
160          * req_xfer_cnt - total number of bytes transfered so far in
161          *              current request
162          * req_length - total request length
163          */
164         bool sg_list_enabled;
165         struct scatterlist *sg_p;
166         uint16_t dma_xfer_sz;
167         uint32_t seg_xfer_cnt;
168         uint16_t req_xfer_cnt;
169         uint16_t req_length;
170         cy_as_hal_dma_type type;
171         cy_bool pending;
172 } cy_as_hal_endpoint_dma;
173
174 /*
175  * The list of OMAP devices (should be one)
176  */
177 static cy_as_omap_dev_kernel *m_omap_list_p;
178
179 /*
180  * The callback to call after DMA operations are complete
181  */
182 static cy_as_hal_dma_complete_callback callback;
183
184 /*
185  * Pending data size for the endpoints
186  */
187 static cy_as_hal_endpoint_dma end_points[16];
188
189 /*
190  * Forward declaration
191  */
192 static void cy_handle_d_r_q_interrupt(cy_as_omap_dev_kernel *dev_p);
193
194 static uint16_t intr_sequence_num;
195 static uint8_t intr__enable;
196 spinlock_t int_lock;
197
198 static u32 iomux_vma;
199 static u32 csa_phy;
200
201 /*
202  * gpmc I/O registers VMA
203  */
204 static u32 gpmc_base;
205
206 /*
207  * gpmc data VMA associated with CS4 (ASTORIA CS on GPMC)
208  */
209 static u32 gpmc_data_vma;
210 static u32 ndata_reg_vma;
211 static u32 ncmd_reg_vma;
212 static u32 naddr_reg_vma;
213
214 /*
215  * fwd declarations
216  */
217 static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff);
218 static void p_nand_lbd_write(u16 col_addr, u32 row_addr, u16 count, void *buff);
219 static inline u16 __attribute__((always_inline))
220                         ast_p_nand_casdo_read(u8 reg_addr8);
221 static inline void __attribute__((always_inline))
222                         ast_p_nand_casdi_write(u8 reg_addr8, u16 data);
223
224 /*
225  * prints given number of omap registers
226  */
227 static void cy_as_hal_print_omap_regs(char *name_prefix,
228                                 u8 name_base, u32 virt_base, u16 count)
229 {
230         u32 reg_val, reg_addr;
231         u16 i;
232         cy_as_hal_print_message(KERN_INFO "\n");
233         for (i = 0; i < count; i++) {
234
235                 reg_addr = virt_base + (i*4);
236                 /* use virtual addresses here*/
237                 reg_val = __raw_readl(reg_addr);
238                 cy_as_hal_print_message(KERN_INFO "%s_%d[%8.8x]=%8.8x\n",
239                                                 name_prefix, name_base+i,
240                                                 reg_addr, reg_val);
241         }
242 }
243
244 /*
245  * setMUX function for a pad + additional pad flags
246  */
247 static u16 omap_cfg_reg_L(u32 pad_func_index)
248 {
249         static u8 sanity_check = 1;
250
251         u32 reg_vma;
252         u16 cur_val, wr_val, rdback_val;
253
254         /*
255          * do sanity check on the omap_mux_pin_cfg[] table
256          */
257         cy_as_hal_print_message(KERN_INFO" OMAP pins user_pad cfg ");
258         if (sanity_check) {
259                 if ((omap_mux_pin_cfg[END_OF_TABLE].name[0] == 'E') &&
260                         (omap_mux_pin_cfg[END_OF_TABLE].name[1] == 'N') &&
261                         (omap_mux_pin_cfg[END_OF_TABLE].name[2] == 'D')) {
262
263                         cy_as_hal_print_message(KERN_INFO
264                                         "table is good.\n");
265                 } else {
266                         cy_as_hal_print_message(KERN_WARNING
267                                         "table is bad, fix it");
268                 }
269                 /*
270                  * do it only once
271                  */
272                 sanity_check = 0;
273         }
274
275         /*
276          * get virtual address to the PADCNF_REG
277          */
278         reg_vma = (u32)iomux_vma + omap_mux_pin_cfg[pad_func_index].offset;
279
280         /*
281          * add additional USER PU/PD/EN flags
282          */
283         wr_val = omap_mux_pin_cfg[pad_func_index].mux_val;
284         cur_val = IORD16(reg_vma);
285
286         /*
287          * PADCFG regs 16 bit long, packed into 32 bit regs,
288          * can also be accessed as u16
289          */
290         IOWR16(reg_vma, wr_val);
291         rdback_val = IORD16(reg_vma);
292
293         /*
294          * in case if the caller wants to save the old value
295          */
296         return wr_val;
297 }
298
299 #define BLKSZ_4K 0x1000
300
301 /*
302  * switch GPMC DATA bus mode
303  */
304 void cy_as_hal_gpmc_enable_16bit_bus(bool dbus16_enabled)
305 {
306         uint32_t tmp32;
307
308         /*
309          * disable gpmc CS4 operation 1st
310          */
311         tmp32 = gpmc_cs_read_reg(AST_GPMC_CS,
312                                 GPMC_CS_CONFIG7) & ~GPMC_CONFIG7_CSVALID;
313         gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7, tmp32);
314
315         /*
316          * GPMC NAND data bus can be 8 or 16 bit wide
317          */
318         if (dbus16_enabled) {
319                 DBGPRN("enabling 16 bit bus\n");
320                 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
321                                 (GPMC_CONFIG1_DEVICETYPE(2) |
322                                 GPMC_CONFIG1_WAIT_PIN_SEL(2) |
323                                 GPMC_CONFIG1_DEVICESIZE_16)
324                                 );
325         } else {
326                 DBGPRN(KERN_INFO "enabling 8 bit bus\n");
327                 gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
328                                 (GPMC_CONFIG1_DEVICETYPE(2) |
329                                 GPMC_CONFIG1_WAIT_PIN_SEL(2))
330                                 );
331         }
332
333         /*
334          * re-enable astoria CS operation on GPMC
335          */
336          gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
337                         (tmp32 | GPMC_CONFIG7_CSVALID));
338
339         /*
340          *remember the state
341          */
342         pnand_16bit = dbus16_enabled;
343 }
344
345 static int cy_as_hal_gpmc_init(void)
346 {
347         u32 tmp32;
348         int err;
349         struct gpmc_timings     timings;
350
351         gpmc_base = (u32)ioremap_nocache(OMAP34XX_GPMC_BASE, BLKSZ_4K);
352         DBGPRN(KERN_INFO "kernel has gpmc_base=%x , val@ the base=%x",
353                 gpmc_base, __raw_readl(gpmc_base)
354         );
355
356         /*
357          * these are globals are full VMAs of the gpmc_base above
358          */
359         ncmd_reg_vma = GPMC_VMA(AST_GPMC_NAND_CMD);
360         naddr_reg_vma = GPMC_VMA(AST_GPMC_NAND_ADDR);
361         ndata_reg_vma = GPMC_VMA(AST_GPMC_NAND_DATA);
362
363         /*
364          * request GPMC CS for ASTORIA request
365          */
366         if (gpmc_cs_request(AST_GPMC_CS, SZ_16M, (void *)&csa_phy) < 0) {
367                 cy_as_hal_print_message(KERN_ERR "error failed to request"
368                                         "ncs4 for ASTORIA\n");
369                         return -1;
370         } else {
371                 DBGPRN(KERN_INFO "got phy_addr:%x for "
372                                 "GPMC CS%d GPMC_CFGREG7[CS4]\n",
373                                  csa_phy, AST_GPMC_CS);
374         }
375
376         /*
377          * request VM region for 4K addr space for chip select 4 phy address
378          * technically we don't need it for NAND devices, but do it anyway
379          * so that data read/write bus cycle can be triggered by reading
380          * or writing this mem region
381          */
382         if (!request_mem_region(csa_phy, BLKSZ_4K, "AST_OMAP_HAL")) {
383                 err = -EBUSY;
384                 cy_as_hal_print_message(KERN_ERR "error MEM region "
385                                         "request for phy_addr:%x failed\n",
386                                         csa_phy);
387                         goto out_free_cs;
388         }
389
390         /*
391          * REMAP mem region associated with our CS
392          */
393         gpmc_data_vma = (u32)ioremap_nocache(csa_phy, BLKSZ_4K);
394         if (!gpmc_data_vma) {
395                 err = -ENOMEM;
396                 cy_as_hal_print_message(KERN_ERR "error- ioremap()"
397                                         "for phy_addr:%x failed", csa_phy);
398
399                 goto out_release_mem_region;
400         }
401         cy_as_hal_print_message(KERN_INFO "ioremap(%x) returned vma=%x\n",
402                                                         csa_phy, gpmc_data_vma);
403
404         gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
405                                                 (GPMC_CONFIG1_DEVICETYPE(2) |
406                                                 GPMC_CONFIG1_WAIT_PIN_SEL(2)));
407
408         memset(&timings, 0, sizeof(timings));
409
410         /* cs timing */
411         timings.cs_on = WB_GPMC_CS_t_o_n;
412         timings.cs_wr_off = WB_GPMC_BUSCYC_t;
413         timings.cs_rd_off = WB_GPMC_BUSCYC_t;
414
415         /* adv timing */
416         timings.adv_on = WB_GPMC_ADV_t_o_n;
417         timings.adv_rd_off = WB_GPMC_BUSCYC_t;
418         timings.adv_wr_off = WB_GPMC_BUSCYC_t;
419
420         /* oe timing */
421         timings.oe_on = WB_GPMC_OE_t_o_n;
422         timings.oe_off = WB_GPMC_OE_t_o_f_f;
423         timings.access = WB_GPMC_RD_t_a_c_c;
424         timings.rd_cycle = WB_GPMC_BUSCYC_t;
425
426         /* we timing */
427         timings.we_on = WB_GPMC_WE_t_o_n;
428         timings.we_off = WB_GPMC_WE_t_o_f_f;
429         timings.wr_access = WB_GPMC_WR_t_a_c_c;
430         timings.wr_cycle = WB_GPMC_BUSCYC_t;
431
432         timings.page_burst_access = WB_GPMC_BUSCYC_t;
433         timings.wr_data_mux_bus = WB_GPMC_BUSCYC_t;
434         gpmc_cs_set_timings(AST_GPMC_CS, &timings);
435
436         cy_as_hal_print_omap_regs("GPMC_CONFIG", 1,
437                         GPMC_VMA(GPMC_CFG_REG(1, AST_GPMC_CS)), 7);
438
439         /*
440          * DISABLE cs4, NOTE GPMC REG7 is already configured
441          * at this point by gpmc_cs_request
442          */
443         tmp32 = gpmc_cs_read_reg(AST_GPMC_CS, GPMC_CS_CONFIG7) &
444                                                 ~GPMC_CONFIG7_CSVALID;
445         gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7, tmp32);
446
447         /*
448          * PROGRAM chip select Region, (see OMAP3430 TRM PAGE 1088)
449          */
450         gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
451                                         (AS_CS_MASK | AS_CS_BADDR));
452
453         /*
454          * by default configure GPMC into 8 bit mode
455          * (to match astoria default mode)
456          */
457         gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
458                                         (GPMC_CONFIG1_DEVICETYPE(2) |
459                                         GPMC_CONFIG1_WAIT_PIN_SEL(2)));
460
461         /*
462          * ENABLE astoria cs operation on GPMC
463          */
464         gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
465                                         (tmp32 | GPMC_CONFIG7_CSVALID));
466
467         /*
468          * No method currently exists to write this register through GPMC APIs
469          * need to change WAIT2 polarity
470          */
471         tmp32 = IORD32(GPMC_VMA(GPMC_CONFIG_REG));
472         tmp32 = tmp32 | NAND_FORCE_POSTED_WRITE_B | 0x40;
473         IOWR32(GPMC_VMA(GPMC_CONFIG_REG), tmp32);
474
475         tmp32 = IORD32(GPMC_VMA(GPMC_CONFIG_REG));
476         cy_as_hal_print_message("GPMC_CONFIG_REG=0x%x\n", tmp32);
477
478         return 0;
479
480 out_release_mem_region:
481         release_mem_region(csa_phy, BLKSZ_4K);
482
483 out_free_cs:
484         gpmc_cs_free(AST_GPMC_CS);
485
486         return err;
487 }
488
489 /*
490  * west bridge astoria ISR (Interrupt handler)
491  */
492 static irqreturn_t cy_astoria_int_handler(int irq,
493                                 void *dev_id, struct pt_regs *regs)
494 {
495         cy_as_omap_dev_kernel *dev_p;
496         uint16_t                  read_val = 0;
497         uint16_t                  mask_val = 0;
498
499         /*
500         * debug stuff, counts number of loops per one intr trigger
501         */
502         uint16_t                  drq_loop_cnt = 0;
503         uint8_t            irq_pin;
504         /*
505          * flags to watch
506          */
507         const uint16_t  sentinel = (CY_AS_MEM_P0_INTR_REG_MCUINT |
508                                 CY_AS_MEM_P0_INTR_REG_MBINT |
509                                 CY_AS_MEM_P0_INTR_REG_PMINT |
510                                 CY_AS_MEM_P0_INTR_REG_PLLLOCKINT);
511
512         /*
513          * sample IRQ pin level (just for statistics)
514          */
515         irq_pin = __gpio_get_value(AST_INT);
516
517         /*
518          * this one just for debugging
519          */
520         intr_sequence_num++;
521
522         /*
523          * astoria device handle
524          */
525         dev_p = dev_id;
526
527         /*
528          * read Astoria intr register
529          */
530         read_val = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
531                                                 CY_AS_MEM_P0_INTR_REG);
532
533         /*
534          * save current mask value
535          */
536         mask_val = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
537                                                 CY_AS_MEM_P0_INT_MASK_REG);
538
539         DBGPRN("<1>HAL__intr__enter:_seq:%d, P0_INTR_REG:%x\n",
540                         intr_sequence_num, read_val);
541
542         /*
543          * Disable WB interrupt signal generation while we are in ISR
544          */
545         cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
546                                         CY_AS_MEM_P0_INT_MASK_REG, 0x0000);
547
548         /*
549         * this is a DRQ Interrupt
550         */
551         if (read_val & CY_AS_MEM_P0_INTR_REG_DRQINT) {
552
553                 do {
554                         /*
555                          * handle DRQ interrupt
556                          */
557                         drq_loop_cnt++;
558
559                         cy_handle_d_r_q_interrupt(dev_p);
560
561                         /*
562                          * spending to much time in ISR may impact
563                          * average system performance
564                          */
565                         if (drq_loop_cnt >= MAX_DRQ_LOOPS_IN_ISR)
566                                 break;
567
568                 /*
569                  * Keep processing if there is another DRQ int flag
570                  */
571                 } while (cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
572                                         CY_AS_MEM_P0_INTR_REG) &
573                                         CY_AS_MEM_P0_INTR_REG_DRQINT);
574         }
575
576         if (read_val & sentinel)
577                 cy_as_intr_service_interrupt((cy_as_hal_device_tag)dev_p);
578
579         DBGPRN("<1>_hal:_intr__exit seq:%d, mask=%4.4x,"
580                         "int_pin:%d DRQ_jobs:%d\n",
581                         intr_sequence_num,
582                         mask_val,
583                         irq_pin,
584                         drq_loop_cnt);
585
586         /*
587          * re-enable WB hw interrupts
588          */
589         cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
590                                         CY_AS_MEM_P0_INT_MASK_REG, mask_val);
591
592         return IRQ_HANDLED;
593 }
594
595 static int cy_as_hal_configure_interrupts(void *dev_p)
596 {
597         int result;
598         int irq_pin  = AST_INT;
599
600         irq_set_irq_type(OMAP_GPIO_IRQ(irq_pin), IRQ_TYPE_LEVEL_LOW);
601
602         /*
603          * for shared IRQS must provide non NULL device ptr
604          * othervise the int won't register
605          * */
606         result = request_irq(OMAP_GPIO_IRQ(irq_pin),
607                                         (irq_handler_t)cy_astoria_int_handler,
608                                         IRQF_SHARED, "AST_INT#", dev_p);
609
610         if (result == 0) {
611                 /*
612                  * OMAP_GPIO_IRQ(irq_pin) - omap logical IRQ number
613                  *              assigned to this interrupt
614                  * OMAP_GPIO_BIT(AST_INT, GPIO_IRQENABLE1) - print status
615                  *              of AST_INT GPIO IRQ_ENABLE FLAG
616                  */
617                 cy_as_hal_print_message(KERN_INFO"AST_INT omap_pin:"
618                                 "%d assigned IRQ #%d IRQEN1=%d\n",
619                                 irq_pin,
620                                 OMAP_GPIO_IRQ(irq_pin),
621                                 OMAP_GPIO_BIT(AST_INT, GPIO_IRQENABLE1)
622                                 );
623         } else {
624                 cy_as_hal_print_message("cyasomaphal: interrupt "
625                                 "failed to register\n");
626                 gpio_free(irq_pin);
627                 cy_as_hal_print_message(KERN_WARNING
628                                 "ASTORIA: can't get assigned IRQ"
629                                 "%i for INT#\n", OMAP_GPIO_IRQ(irq_pin));
630         }
631
632         return result;
633 }
634
635 /*
636  * initialize OMAP pads/pins to user defined functions
637  */
638 static void cy_as_hal_init_user_pads(user_pad_cfg_t *pad_cfg_tab)
639 {
640         /*
641          * browse through the table an dinitiaze the pins
642          */
643         u32 in_level = 0;
644         u16 tmp16, mux_val;
645
646         while (pad_cfg_tab->name != NULL) {
647
648                 if (gpio_request(pad_cfg_tab->pin_num, NULL) == 0) {
649
650                         pad_cfg_tab->valid = 1;
651                         mux_val = omap_cfg_reg_L(pad_cfg_tab->mux_func);
652
653                         /*
654                          * always set drv level before changing out direction
655                          */
656                         __gpio_set_value(pad_cfg_tab->pin_num,
657                                                         pad_cfg_tab->drv);
658
659                         /*
660                          * "0" - OUT, "1", input omap_set_gpio_direction
661                          * (pad_cfg_tab->pin_num, pad_cfg_tab->dir);
662                          */
663                         if (pad_cfg_tab->dir)
664                                 gpio_direction_input(pad_cfg_tab->pin_num);
665                         else
666                                 gpio_direction_output(pad_cfg_tab->pin_num,
667                                                         pad_cfg_tab->drv);
668
669                         /*  sample the pin  */
670                         in_level = __gpio_get_value(pad_cfg_tab->pin_num);
671
672                         cy_as_hal_print_message(KERN_INFO "configured %s to "
673                                         "OMAP pad_%d, DIR=%d "
674                                         "DOUT=%d, DIN=%d\n",
675                                         pad_cfg_tab->name,
676                                         pad_cfg_tab->pin_num,
677                                         pad_cfg_tab->dir,
678                                         pad_cfg_tab->drv,
679                                         in_level
680                         );
681                 } else {
682                         /*
683                          * get the pad_mux value to check on the pin_function
684                          */
685                         cy_as_hal_print_message(KERN_INFO "couldn't cfg pin %d"
686                                         "for signal %s, its already taken\n",
687                                         pad_cfg_tab->pin_num,
688                                         pad_cfg_tab->name);
689                 }
690
691                 tmp16 = *(u16 *)PADCFG_VMA
692                         (omap_mux_pin_cfg[pad_cfg_tab->mux_func].offset);
693
694                 cy_as_hal_print_message(KERN_INFO "GPIO_%d(PAD_CFG=%x,OE=%d"
695                         "DOUT=%d, DIN=%d IRQEN=%d)\n\n",
696                         pad_cfg_tab->pin_num, tmp16,
697                         OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_OE),
698                         OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_DATA_OUT),
699                         OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_DATA_IN),
700                         OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_IRQENABLE1)
701                         );
702
703                 /*
704                  * next pad_cfg deriptor
705                  */
706                 pad_cfg_tab++;
707         }
708
709         cy_as_hal_print_message(KERN_INFO"pads configured\n");
710 }
711
712
713 /*
714  * release gpios taken by the module
715  */
716 static void cy_as_hal_release_user_pads(user_pad_cfg_t *pad_cfg_tab)
717 {
718         while (pad_cfg_tab->name != NULL) {
719
720                 if (pad_cfg_tab->valid) {
721                         gpio_free(pad_cfg_tab->pin_num);
722                         pad_cfg_tab->valid = 0;
723                         cy_as_hal_print_message(KERN_INFO "GPIO_%d "
724                                         "released from %s\n",
725                                         pad_cfg_tab->pin_num,
726                                         pad_cfg_tab->name);
727                 } else {
728                         cy_as_hal_print_message(KERN_INFO "no release "
729                                         "for %s, GPIO_%d, wasn't acquired\n",
730                                         pad_cfg_tab->name,
731                                         pad_cfg_tab->pin_num);
732                 }
733                 pad_cfg_tab++;
734         }
735 }
736
737 void cy_as_hal_config_c_s_mux(void)
738 {
739         /*
740          * FORCE the GPMC CS4 pin (it is in use by the  zoom system)
741          */
742         omap_cfg_reg_L(T8_OMAP3430_GPMC_n_c_s4);
743 }
744 EXPORT_SYMBOL(cy_as_hal_config_c_s_mux);
745
746 /*
747  * inits all omap h/w
748  */
749 uint32_t cy_as_hal_processor_hw_init(void)
750 {
751         int i, err;
752
753         cy_as_hal_print_message(KERN_INFO "init OMAP3430 hw...\n");
754
755         iomux_vma = (u32)ioremap_nocache(
756                                 (u32)CTLPADCONF_BASE_ADDR, CTLPADCONF_SIZE);
757         cy_as_hal_print_message(KERN_INFO "PADCONF_VMA=%x val=%x\n",
758                                 iomux_vma, IORD32(iomux_vma));
759
760         /*
761          * remap gpio banks
762          */
763         for (i = 0; i < 6; i++) {
764                 gpio_vma_tab[i].virt_addr = (u32)ioremap_nocache(
765                                         gpio_vma_tab[i].phy_addr,
766                                         gpio_vma_tab[i].size);
767
768                 cy_as_hal_print_message(KERN_INFO "%s virt_addr=%x\n",
769                                         gpio_vma_tab[i].name,
770                                         (u32)gpio_vma_tab[i].virt_addr);
771         };
772
773         /*
774          * force OMAP_GPIO_126  to rleased state,
775          * will be configured to drive reset
776          */
777         gpio_free(AST_RESET);
778
779         /*
780          *same thing with AStoria CS pin
781          */
782         gpio_free(AST_CS);
783
784         /*
785          * initialize all the OMAP pads connected to astoria
786          */
787         cy_as_hal_init_user_pads(user_pad_cfg);
788
789         err = cy_as_hal_gpmc_init();
790         if (err < 0)
791                 cy_as_hal_print_message(KERN_INFO"gpmc init failed:%d", err);
792
793         cy_as_hal_config_c_s_mux();
794
795         return gpmc_data_vma;
796 }
797 EXPORT_SYMBOL(cy_as_hal_processor_hw_init);
798
799 void cy_as_hal_omap_hardware_deinit(cy_as_omap_dev_kernel *dev_p)
800 {
801         /*
802          * free omap hw resources
803          */
804         if (gpmc_data_vma != 0)
805                 iounmap((void *)gpmc_data_vma);
806
807         if (csa_phy != 0)
808                 release_mem_region(csa_phy, BLKSZ_4K);
809
810         gpmc_cs_free(AST_GPMC_CS);
811
812         free_irq(OMAP_GPIO_IRQ(AST_INT), dev_p);
813
814         cy_as_hal_release_user_pads(user_pad_cfg);
815 }
816
817 /*
818  * These are the functions that are not part of the
819  * HAL layer, but are required to be called for this HAL
820  */
821
822 /*
823  * Called On AstDevice LKM exit
824  */
825 int stop_o_m_a_p_kernel(const char *pgm, cy_as_hal_device_tag tag)
826 {
827         cy_as_omap_dev_kernel *dev_p = (cy_as_omap_dev_kernel *)tag;
828
829         /*
830          * TODO: Need to disable WB interrupt handlere 1st
831          */
832         if (0 == dev_p)
833                 return 1;
834
835         cy_as_hal_print_message("<1>_stopping OMAP34xx HAL layer object\n");
836         if (dev_p->m_sig != CY_AS_OMAP_KERNEL_HAL_SIG) {
837                 cy_as_hal_print_message("<1>%s: %s: bad HAL tag\n",
838                                                                 pgm, __func__);
839                 return 1;
840         }
841
842         /*
843          * disable interrupt
844          */
845         cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
846                         CY_AS_MEM_P0_INT_MASK_REG, 0x0000);
847
848 #if 0
849         if (dev_p->thread_flag == 0) {
850                 dev_p->thread_flag = 1;
851                 wait_for_completion(&dev_p->thread_complete);
852                 cy_as_hal_print_message("cyasomaphal:"
853                         "done cleaning thread\n");
854                 cy_as_hal_destroy_sleep_channel(&dev_p->thread_sc);
855         }
856 #endif
857
858         cy_as_hal_omap_hardware_deinit(dev_p);
859
860         /*
861          * Rearrange the list
862          */
863         if (m_omap_list_p == dev_p)
864                 m_omap_list_p = dev_p->m_next_p;
865
866         cy_as_hal_free(dev_p);
867
868         cy_as_hal_print_message(KERN_INFO"OMAP_kernel_hal stopped\n");
869         return 0;
870 }
871
872 int omap_start_intr(cy_as_hal_device_tag tag)
873 {
874         cy_as_omap_dev_kernel *dev_p = (cy_as_omap_dev_kernel *)tag;
875         int ret = 0;
876         const uint16_t mask = CY_AS_MEM_P0_INTR_REG_DRQINT |
877                                 CY_AS_MEM_P0_INTR_REG_MBINT;
878
879         /*
880          * register for interrupts
881          */
882         ret = cy_as_hal_configure_interrupts(dev_p);
883
884         /*
885          * enable only MBox & DRQ interrupts for now
886          */
887         cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
888                                 CY_AS_MEM_P0_INT_MASK_REG, mask);
889
890         return 1;
891 }
892
893 /*
894  * Below are the functions that communicate with the WestBridge device.
895  * These are system dependent and must be defined by the HAL layer
896  * for a given system.
897  */
898
899 /*
900  * GPMC NAND command+addr write phase
901  */
902 static inline void nand_cmd_n_addr(u8 cmdb1, u16 col_addr, u32 row_addr)
903 {
904         /*
905          * byte order on the bus <cmd> <CA0,CA1,RA0,RA1, RA2>
906          */
907         u32 tmpa32 = ((row_addr << 16) | col_addr);
908         u8 RA2 = (u8)(row_addr >> 16);
909
910         if (!pnand_16bit) {
911                 /*
912                  * GPMC PNAND 8bit BUS
913                  */
914                 /*
915                  * CMD1
916                  */
917                 IOWR8(ncmd_reg_vma, cmdb1);
918
919                 /*
920                  *pnand bus: <CA0,CA1,RA0,RA1>
921                  */
922                 IOWR32(naddr_reg_vma, tmpa32);
923
924                 /*
925                  * <RA2> , always zero
926                  */
927                 IOWR8(naddr_reg_vma, RA2);
928
929         } else {
930                 /*
931                  * GPMC PNAND 16bit BUS , in 16 bit mode CMD
932                  * and ADDR sent on [d7..d0]
933                  */
934                 uint8_t CA0, CA1, RA0, RA1;
935                 CA0 = tmpa32 & 0x000000ff;
936                 CA1 = (tmpa32 >> 8) &  0x000000ff;
937                 RA0 = (tmpa32 >> 16) & 0x000000ff;
938                 RA1 = (tmpa32 >> 24) & 0x000000ff;
939
940                 /*
941                  * can't use 32 bit writes here omap will not serialize
942                  * them to lower half in16 bit mode
943                  */
944
945                 /*
946                  *pnand bus: <CMD1, CA0,CA1,RA0,RA1, RA2 (always zero)>
947                  */
948                 IOWR8(ncmd_reg_vma, cmdb1);
949                 IOWR8(naddr_reg_vma, CA0);
950                 IOWR8(naddr_reg_vma, CA1);
951                 IOWR8(naddr_reg_vma, RA0);
952                 IOWR8(naddr_reg_vma, RA1);
953                 IOWR8(naddr_reg_vma, RA2);
954         }
955 }
956
957 /*
958  * spin until r/b goes high
959  */
960 inline int wait_rn_b_high(void)
961 {
962         u32 w_spins = 0;
963
964         /*
965          * TODO: note R/b may go low here, need to spin until high
966          * while (omap_get_gpio_datain(AST_RnB) == 0) {
967          * w_spins++;
968          * }
969          * if (OMAP_GPIO_BIT(AST_RnB, GPIO_DATA_IN)  == 0) {
970          *
971          * while (OMAP_GPIO_BIT(AST_RnB, GPIO_DATA_IN)  == 0) {
972          * w_spins++;
973          * }
974          * printk("<1>RnB=0!:%d\n",w_spins);
975          * }
976          */
977         return w_spins;
978 }
979
980 #ifdef ENABLE_GPMC_PF_ENGINE
981 /* #define PFE_READ_DEBUG
982  * PNAND  block read with OMAP PFE enabled
983  * status: Not tested, NW, broken , etc
984  */
985 static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
986 {
987         uint16_t w32cnt;
988         uint32_t *ptr32;
989         uint8_t *ptr8;
990         uint8_t  bytes_in_fifo;
991
992         /* debug vars*/
993 #ifdef PFE_READ_DEBUG
994         uint32_t loop_limit;
995         uint16_t bytes_read = 0;
996 #endif
997
998         /*
999          * configure the prefetch engine
1000          */
1001         uint32_t tmp32;
1002         uint32_t pfe_status;
1003
1004         /*
1005          * DISABLE GPMC CS4 operation 1st, this is
1006          * in case engine is be already disabled
1007          */
1008         IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x0);
1009         IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG1), GPMC_PREFETCH_CONFIG1_VAL);
1010         IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG2), count);
1011
1012 #ifdef PFE_READ_DEBUG
1013         tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_CONFIG1));
1014         if (tmp32 != GPMC_PREFETCH_CONFIG1_VAL) {
1015                 printk(KERN_INFO "<1> prefetch is CONFIG1 read val:%8.8x, != VAL written:%8.8x\n",
1016                                 tmp32, GPMC_PREFETCH_CONFIG1_VAL);
1017                 tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
1018                 printk(KERN_INFO "<1> GPMC_PREFETCH_STATUS : %8.8x\n", tmp32);
1019         }
1020
1021         /*
1022          *sanity check 2
1023          */
1024         tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_CONFIG2));
1025         if (tmp32 != (count))
1026                 printk(KERN_INFO "<1> GPMC_PREFETCH_CONFIG2 read val:%d, "
1027                                 "!= VAL written:%d\n", tmp32, count);
1028 #endif
1029
1030         /*
1031          * ISSUE PNAND CMD+ADDR, note gpmc puts 32b words
1032          * on the bus least sig. byte 1st
1033          */
1034         nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
1035
1036         IOWR8(ncmd_reg_vma, RDPAGE_B2);
1037
1038         /*
1039          * start the prefetch engine
1040          */
1041         IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x1);
1042
1043         ptr32 = buff;
1044
1045         while (1) {
1046                 /*
1047                  * GPMC PFE service loop
1048                  */
1049                 do {
1050                         /*
1051                          * spin until PFE fetched some
1052                          * PNAND bus words in the FIFO
1053                          */
1054                         pfe_status = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
1055                         bytes_in_fifo = (pfe_status >> 24) & 0x7f;
1056                 } while (bytes_in_fifo == 0);
1057
1058                 /* whole 32 bit words in fifo */
1059                 w32cnt = bytes_in_fifo >> 2;
1060
1061 #if 0
1062            /*
1063                 *NOTE: FIFO_PTR indicates number of NAND bus words bytes
1064                 *   already received in the FIFO and available to be read
1065                 *   by DMA or MPU whether COUNTVAL indicates number of BUS
1066                 *   words yet to be read from PNAND bus words
1067                 */
1068                 printk(KERN_ERR "<1> got PF_STATUS:%8.8x FIFO_PTR:%d, COUNTVAL:%d, w32cnt:%d\n",
1069                                         pfe_status, bytes_in_fifo,
1070                                         (pfe_status & 0x3fff), w32cnt);
1071 #endif
1072
1073                 while (w32cnt--)
1074                         *ptr32++ = IORD32(gpmc_data_vma);
1075
1076                 if ((pfe_status & 0x3fff) == 0) {
1077                         /*
1078                          * PFE acc angine done, there still may be data leftover
1079                          * in the FIFO re-read FIFO BYTE counter (check for
1080                          * leftovers from 32 bit read accesses above)
1081                          */
1082                         bytes_in_fifo = (IORD32(
1083                                 GPMC_VMA(GPMC_PREFETCH_STATUS)) >> 24) & 0x7f;
1084
1085                         /*
1086                          * NOTE we may still have one word left in the fifo
1087                          * read it out
1088                          */
1089                         ptr8 = ptr32;
1090                         switch (bytes_in_fifo) {
1091
1092                         case 0:
1093                                 /*
1094                                  * nothing to do we already read the
1095                                  * FIFO out with 32 bit accesses
1096                                  */
1097                                 break;
1098                         case 1:
1099                                 /*
1100                                 * this only possible
1101                                 * for 8 bit pNAND only
1102                                 */
1103                                 *ptr8 = IORD8(gpmc_data_vma);
1104                                 break;
1105
1106                         case 2:
1107                                 /*
1108                                  * this one can occur in either modes
1109                                  */
1110                                 *(uint16_t *)ptr8 = IORD16(gpmc_data_vma);
1111                                 break;
1112
1113                         case 3:
1114                                 /*
1115                                  * this only possible for 8 bit pNAND only
1116                                  */
1117                                 *(uint16_t *)ptr8 = IORD16(gpmc_data_vma);
1118                                 ptr8 += 2;
1119                                 *ptr8 = IORD8(gpmc_data_vma);
1120                                 break;
1121
1122                         case 4:
1123                                 /*
1124                                  * shouldn't happen, but has been seen
1125                                  * in 8 bit mode
1126                                  */
1127                                 *ptr32 = IORD32(gpmc_data_vma);
1128                                 break;
1129
1130                         default:
1131                                 printk(KERN_ERR"<1>_error: PFE FIFO bytes leftover is not read:%d\n",
1132                                                                 bytes_in_fifo);
1133                                 break;
1134                         }
1135                         /*
1136                          * read is completed, get out of the while(1) loop
1137                          */
1138                         break;
1139                 }
1140         }
1141 }
1142 #endif
1143
1144 #ifdef PFE_LBD_READ_V2
1145 /*
1146  * PFE engine assisted reads with the 64 byte blocks
1147  */
1148 static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
1149 {
1150         uint8_t rd_cnt;
1151         uint32_t *ptr32;
1152         uint8_t  *ptr8;
1153         uint16_t reminder;
1154         uint32_t pfe_status;
1155
1156         /*
1157          * ISSUE PNAND CMD+ADDR
1158          * note gpmc puts 32b words on the bus least sig. byte 1st
1159          */
1160         nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
1161         IOWR8(ncmd_reg_vma, RDPAGE_B2);
1162
1163         /*
1164          * setup PFE block
1165          * count - OMAP number of bytes to access on pnand bus
1166          */
1167
1168         IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG1), GPMC_PREFETCH_CONFIG1_VAL);
1169         IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG2), count);
1170         IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x1);
1171
1172         ptr32 = buff;
1173
1174         do {
1175                 pfe_status = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
1176                 rd_cnt =  pfe_status >> (24+2);
1177
1178                 while (rd_cnt--)
1179                         *ptr32++ = IORD32(gpmc_data_vma);
1180
1181         } while (pfe_status & 0x3fff);
1182
1183         /*
1184          * read out the leftover
1185          */
1186         ptr8 = ptr32;
1187         rd_cnt = (IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS))  >> 24) & 0x7f;
1188
1189         while (rd_cnt--)
1190                 *ptr8++ = IORD8(gpmc_data_vma);
1191 }
1192 #endif
1193
1194 #ifdef PNAND_LBD_READ_NO_PFE
1195 /*
1196  * Endpoint buffer read  w/o OMAP GPMC Prefetch Engine
1197  * the original working code, works at max speed for 8 bit xfers
1198  * for 16 bit the bus diagram has gaps
1199  */
1200 static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
1201 {
1202         uint16_t w32cnt;
1203         uint32_t *ptr32;
1204         uint16_t *ptr16;
1205         uint16_t remainder;
1206
1207         DBGPRN("<1> %s(): NO_PFE\n", __func__);
1208
1209         ptr32 = buff;
1210         /* number of whole 32 bit words in the transfer */
1211         w32cnt = count >> 2;
1212
1213         /* remainder, in bytes(0..3) */
1214         remainder =  count & 03;
1215
1216         /*
1217          * note gpmc puts 32b words on the bus least sig. byte 1st
1218          */
1219         nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
1220         IOWR8(ncmd_reg_vma, RDPAGE_B2);
1221
1222         /*
1223          * read data by 32 bit chunks
1224          */
1225         while (w32cnt--)
1226                 *ptr32++ = IORD32(ndata_reg_vma);
1227
1228         /*
1229          * now do the remainder(it can be 0, 1, 2 or 3)
1230          * same code for both 8 & 16 bit bus
1231          * do 1 or 2 MORE words
1232          */
1233         ptr16 = (uint16_t *)ptr32;
1234
1235         switch (remainder) {
1236         case 1:
1237                 /*  read one 16 bit word
1238                  * IN 8 BIT WE NEED TO READ even number of bytes
1239                  */
1240         case 2:
1241                 *ptr16 = IORD16(ndata_reg_vma);
1242                 break;
1243         case 3:
1244                 /*
1245                  * for 3 bytes read 2 16 bit words
1246                  */
1247                 *ptr16++ = IORD16(ndata_reg_vma);
1248                 *ptr16   = IORD16(ndata_reg_vma);
1249                 break;
1250         default:
1251                 /*
1252                  * remainder is 0
1253                  */
1254                 break;
1255         }
1256 }
1257 #endif
1258
1259 /*
1260  * uses LBD mode to write N bytes into astoria
1261  * Status: Working, however there are 150ns idle
1262  * timeafter every 2 (16 bit or 4(8 bit) bus cycles
1263  */
1264 static void p_nand_lbd_write(u16 col_addr, u32 row_addr, u16 count, void *buff)
1265 {
1266         uint16_t w32cnt;
1267         uint16_t remainder;
1268         uint8_t  *ptr8;
1269         uint16_t *ptr16;
1270         uint32_t *ptr32;
1271
1272         remainder =  count & 03;
1273         w32cnt = count >> 2;
1274         ptr32 = buff;
1275         ptr8 = buff;
1276
1277         /*
1278          * send: CMDB1, CA0,CA1,RA0,RA1,RA2
1279          */
1280         nand_cmd_n_addr(PGMPAGE_B1, col_addr, row_addr);
1281
1282         /*
1283          * blast the data out in 32bit chunks
1284          */
1285         while (w32cnt--)
1286                 IOWR32(ndata_reg_vma, *ptr32++);
1287
1288         /*
1289          * do the reminder if there is one
1290          * same handling for both 8 & 16 bit pnand: mode
1291          */
1292         ptr16 = (uint16_t *)ptr32; /* do 1 or 2  words */
1293
1294         switch (remainder) {
1295         case 1:
1296                 /*
1297                  * read one 16 bit word
1298                  */
1299         case 2:
1300                 IOWR16(ndata_reg_vma, *ptr16);
1301                 break;
1302
1303         case 3:
1304                 /*
1305                  * for 3 bytes read 2 16 bit words
1306                  */
1307                 IOWR16(ndata_reg_vma, *ptr16++);
1308                 IOWR16(ndata_reg_vma, *ptr16);
1309                 break;
1310         default:
1311                 /*
1312                  * reminder is 0
1313                  */
1314                 break;
1315         }
1316         /*
1317          * finally issue a PGM cmd
1318          */
1319         IOWR8(ncmd_reg_vma, PGMPAGE_B2);
1320 }
1321
1322 /*
1323  * write Astoria register
1324  */
1325 static inline void ast_p_nand_casdi_write(u8 reg_addr8, u16 data)
1326 {
1327         unsigned long flags;
1328         u16 addr16;
1329         /*
1330          * throw an error if called from multiple threads
1331          */
1332         static atomic_t rdreg_usage_cnt = { 0 };
1333
1334         /*
1335          * disable interrupts
1336          */
1337         local_irq_save(flags);
1338
1339         if (atomic_read(&rdreg_usage_cnt) != 0) {
1340                 cy_as_hal_print_message(KERN_ERR "cy_as_omap_hal:"
1341                                 "* cy_as_hal_write_register usage:%d\n",
1342                                 atomic_read(&rdreg_usage_cnt));
1343         }
1344
1345         atomic_inc(&rdreg_usage_cnt);
1346
1347         /*
1348          * 2 flavors of GPMC -> PNAND  access
1349          */
1350         if (pnand_16bit) {
1351                 /*
1352                  *  16 BIT gpmc NAND mode
1353                  */
1354
1355                 /*
1356                  * CMD1, CA1, CA2,
1357                  */
1358                 IOWR8(ncmd_reg_vma, 0x85);
1359                 IOWR8(naddr_reg_vma, reg_addr8);
1360                 IOWR8(naddr_reg_vma, 0x0c);
1361
1362                 /*
1363                  * this should be sent on the 16 bit bus
1364                  */
1365                 IOWR16(ndata_reg_vma, data);
1366         } else {
1367                 /*
1368                  * 8 bit nand mode GPMC will automatically
1369                  * seriallize 16bit or 32 bit writes into
1370                  * 8 bit onesto the lower 8 bit in LE order
1371                  */
1372                 addr16 = 0x0c00 | reg_addr8;
1373
1374                 /*
1375                  * CMD1, CA1, CA2,
1376                  */
1377                 IOWR8(ncmd_reg_vma, 0x85);
1378                 IOWR16(naddr_reg_vma, addr16);
1379                 IOWR16(ndata_reg_vma, data);
1380         }
1381
1382         /*
1383          * re-enable interrupts
1384          */
1385         atomic_dec(&rdreg_usage_cnt);
1386         local_irq_restore(flags);
1387 }
1388
1389
1390 /*
1391  * read astoria register via pNAND interface
1392  */
1393 static inline u16 ast_p_nand_casdo_read(u8 reg_addr8)
1394 {
1395         u16 data;
1396         u16 addr16;
1397         unsigned long flags;
1398         /*
1399          * throw an error if called from multiple threads
1400          */
1401         static atomic_t wrreg_usage_cnt = { 0 };
1402
1403         /*
1404          * disable interrupts
1405          */
1406         local_irq_save(flags);
1407
1408         if (atomic_read(&wrreg_usage_cnt) != 0) {
1409                 /*
1410                  * if it gets here ( from other threads), this function needs
1411                  * need spin_lock_irq save() protection
1412                  */
1413                 cy_as_hal_print_message(KERN_ERR"cy_as_omap_hal: "
1414                                 "cy_as_hal_write_register usage:%d\n",
1415                                 atomic_read(&wrreg_usage_cnt));
1416         }
1417         atomic_inc(&wrreg_usage_cnt);
1418
1419         /*
1420          * 2 flavors of GPMC -> PNAND  access
1421          */
1422         if (pnand_16bit) {
1423                 /*
1424                  *  16 BIT gpmc NAND mode
1425                  *  CMD1, CA1, CA2,
1426                  */
1427
1428                 IOWR8(ncmd_reg_vma, 0x05);
1429                 IOWR8(naddr_reg_vma, reg_addr8);
1430                 IOWR8(naddr_reg_vma, 0x0c);
1431                 IOWR8(ncmd_reg_vma, 0x00E0);
1432
1433                 udelay(1);
1434
1435                 /*
1436                  * much faster through the gPMC Register space
1437                  */
1438                 data = IORD16(ndata_reg_vma);
1439         } else {
1440                 /*
1441                  *  8 BIT gpmc NAND mode
1442                  *  CMD1, CA1, CA2, CMD2
1443                  */
1444                 addr16 = 0x0c00 | reg_addr8;
1445                 IOWR8(ncmd_reg_vma, 0x05);
1446                 IOWR16(naddr_reg_vma, addr16);
1447                 IOWR8(ncmd_reg_vma, 0xE0);
1448                 udelay(1);
1449                 data = IORD16(ndata_reg_vma);
1450         }
1451
1452         /*
1453          * re-enable interrupts
1454          */
1455         atomic_dec(&wrreg_usage_cnt);
1456         local_irq_restore(flags);
1457
1458         return data;
1459 }
1460
1461
1462 /*
1463  * This function must be defined to write a register within the WestBridge
1464  * device.  The addr value is the address of the register to write with
1465  * respect to the base address of the WestBridge device.
1466  */
1467 void cy_as_hal_write_register(
1468                                         cy_as_hal_device_tag tag,
1469                                         uint16_t addr, uint16_t data)
1470 {
1471         ast_p_nand_casdi_write((u8)addr, data);
1472 }
1473
1474 /*
1475  * This function must be defined to read a register from the WestBridge
1476  * device.  The addr value is the address of the register to read with
1477  * respect to the base address of the WestBridge device.
1478  */
1479 uint16_t cy_as_hal_read_register(cy_as_hal_device_tag tag, uint16_t addr)
1480 {
1481         uint16_t data  = 0;
1482
1483         /*
1484          * READ ASTORIA REGISTER USING CASDO
1485          */
1486         data = ast_p_nand_casdo_read((u8)addr);
1487
1488         return data;
1489 }
1490
1491 /*
1492  * preps Ep pointers & data counters for next packet
1493  * (fragment of the request) xfer returns true if
1494  * there is a next transfer, and false if all bytes in
1495  * current request have been xfered
1496  */
1497 static inline bool prep_for_next_xfer(cy_as_hal_device_tag tag, uint8_t ep)
1498 {
1499
1500         if (!end_points[ep].sg_list_enabled) {
1501                 /*
1502                  * no further transfers for non storage EPs
1503                  * (like EP2 during firmware download, done
1504                  * in 64 byte chunks)
1505                  */
1506                 if (end_points[ep].req_xfer_cnt >= end_points[ep].req_length) {
1507                         DBGPRN("<1> %s():RQ sz:%d non-_sg EP:%d completed\n",
1508                                 __func__, end_points[ep].req_length, ep);
1509
1510                         /*
1511                          * no more transfers, we are done with the request
1512                          */
1513                         return false;
1514                 }
1515
1516                 /*
1517                  * calculate size of the next DMA xfer, corner
1518                  * case for non-storage EPs where transfer size
1519                  * is not egual N * HAL_DMA_PKT_SZ xfers
1520                  */
1521                 if ((end_points[ep].req_length - end_points[ep].req_xfer_cnt)
1522                 >= HAL_DMA_PKT_SZ) {
1523                                 end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
1524                 } else {
1525                         /*
1526                          * that would be the last chunk less
1527                          * than P-port max size
1528                          */
1529                         end_points[ep].dma_xfer_sz = end_points[ep].req_length -
1530                                         end_points[ep].req_xfer_cnt;
1531                 }
1532
1533                 return true;
1534         }
1535
1536         /*
1537          * for SG_list assisted dma xfers
1538          * are we done with current SG ?
1539          */
1540         if (end_points[ep].seg_xfer_cnt ==  end_points[ep].sg_p->length) {
1541                 /*
1542                  *  was it the Last SG segment on the list ?
1543                  */
1544                 if (sg_is_last(end_points[ep].sg_p)) {
1545                         DBGPRN("<1> %s: EP:%d completed,"
1546                                         "%d bytes xfered\n",
1547                                         __func__,
1548                                         ep,
1549                                         end_points[ep].req_xfer_cnt
1550                         );
1551
1552                         return false;
1553                 } else {
1554                         /*
1555                          * There are more SG segments in current
1556                          * request's sg list setup new segment
1557                          */
1558
1559                         end_points[ep].seg_xfer_cnt = 0;
1560                         end_points[ep].sg_p = sg_next(end_points[ep].sg_p);
1561                         /* set data pointer for next DMA sg transfer*/
1562                         end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
1563                         DBGPRN("<1> %s new SG:_va:%p\n\n",
1564                                         __func__, end_points[ep].data_p);
1565                 }
1566
1567         }
1568
1569         /*
1570          * for sg list xfers it will always be 512 or 1024
1571          */
1572         end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
1573
1574         /*
1575          * next transfer is required
1576          */
1577
1578         return true;
1579 }
1580
1581 /*
1582  * Astoria DMA read request, APP_CPU reads from WB ep buffer
1583  */
1584 static void cy_service_e_p_dma_read_request(
1585                         cy_as_omap_dev_kernel *dev_p, uint8_t ep)
1586 {
1587         cy_as_hal_device_tag tag = (cy_as_hal_device_tag)dev_p;
1588         uint16_t  v, size;
1589         void    *dptr;
1590         uint16_t col_addr = 0x0000;
1591         uint32_t row_addr = CYAS_DEV_CALC_EP_ADDR(ep);
1592         uint16_t ep_dma_reg = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1593
1594         /*
1595          * get the XFER size frtom WB eP DMA REGISTER
1596          */
1597         v = cy_as_hal_read_register(tag, ep_dma_reg);
1598
1599         /*
1600          * amount of data in EP buff in  bytes
1601          */
1602         size =  v & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK;
1603
1604         /*
1605          * memory pointer for this DMA packet xfer (sub_segment)
1606          */
1607         dptr = end_points[ep].data_p;
1608
1609         DBGPRN("<1>HAL:_svc_dma_read on EP_%d sz:%d, intr_seq:%d, dptr:%p\n",
1610                 ep,
1611                 size,
1612                 intr_sequence_num,
1613                 dptr
1614         );
1615
1616         cy_as_hal_assert(size != 0);
1617
1618         if (size) {
1619                 /*
1620                  * the actual WB-->OMAP memory "soft" DMA xfer
1621                  */
1622                 p_nand_lbd_read(col_addr, row_addr, size, dptr);
1623         }
1624
1625         /*
1626          * clear DMAVALID bit indicating that the data has been read
1627          */
1628         cy_as_hal_write_register(tag, ep_dma_reg, 0);
1629
1630         end_points[ep].seg_xfer_cnt += size;
1631         end_points[ep].req_xfer_cnt += size;
1632
1633         /*
1634          *  pre-advance data pointer (if it's outside sg
1635          * list it will be reset anyway
1636          */
1637         end_points[ep].data_p += size;
1638
1639         if (prep_for_next_xfer(tag, ep)) {
1640                 /*
1641                  * we have more data to read in this request,
1642                  * setup next dma packet due tell WB how much
1643                  * data we are going to xfer next
1644                  */
1645                 v = end_points[ep].dma_xfer_sz/*HAL_DMA_PKT_SZ*/ |
1646                                 CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
1647                 cy_as_hal_write_register(tag, ep_dma_reg, v);
1648         } else {
1649                 end_points[ep].pending    = cy_false;
1650                 end_points[ep].type              = cy_as_hal_none;
1651                 end_points[ep].buffer_valid = cy_false;
1652
1653                 /*
1654                  * notify the API that we are done with rq on this EP
1655                  */
1656                 if (callback) {
1657                         DBGPRN("<1>trigg rd_dma completion cb: xfer_sz:%d\n",
1658                                 end_points[ep].req_xfer_cnt);
1659                                 callback(tag, ep,
1660                                         end_points[ep].req_xfer_cnt,
1661                                         CY_AS_ERROR_SUCCESS);
1662                 }
1663         }
1664 }
1665
1666 /*
1667  * omap_cpu needs to transfer data to ASTORIA EP buffer
1668  */
1669 static void cy_service_e_p_dma_write_request(
1670                         cy_as_omap_dev_kernel *dev_p, uint8_t ep)
1671 {
1672         uint16_t  addr;
1673         uint16_t v  = 0;
1674         uint32_t  size;
1675         uint16_t col_addr = 0x0000;
1676         uint32_t row_addr = CYAS_DEV_CALC_EP_ADDR(ep);
1677         void    *dptr;
1678
1679         cy_as_hal_device_tag tag = (cy_as_hal_device_tag)dev_p;
1680         /*
1681          * note: size here its the size of the dma transfer could be
1682          * anything > 0 && < P_PORT packet size
1683          */
1684         size = end_points[ep].dma_xfer_sz;
1685         dptr = end_points[ep].data_p;
1686
1687         /*
1688          * perform the soft DMA transfer, soft in this case
1689          */
1690         if (size)
1691                 p_nand_lbd_write(col_addr, row_addr, size, dptr);
1692
1693         end_points[ep].seg_xfer_cnt += size;
1694         end_points[ep].req_xfer_cnt += size;
1695         /*
1696          * pre-advance data pointer
1697          * (if it's outside sg list it will be reset anyway)
1698          */
1699         end_points[ep].data_p += size;
1700
1701         /*
1702          * now clear DMAVAL bit to indicate we are done
1703          * transferring data and that the data can now be
1704          * sent via USB to the USB host, sent to storage,
1705          * or used internally.
1706          */
1707
1708         addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1709         cy_as_hal_write_register(tag, addr, size);
1710
1711         /*
1712          * finally, tell the USB subsystem that the
1713          * data is gone and we can accept the
1714          * next request if one exists.
1715          */
1716         if (prep_for_next_xfer(tag, ep)) {
1717                 /*
1718                  * There is more data to go. Re-init the WestBridge DMA side
1719                  */
1720                 v = end_points[ep].dma_xfer_sz |
1721                         CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
1722                 cy_as_hal_write_register(tag, addr, v);
1723         } else {
1724
1725            end_points[ep].pending         = cy_false;
1726            end_points[ep].type           = cy_as_hal_none;
1727            end_points[ep].buffer_valid = cy_false;
1728
1729                 /*
1730                  * notify the API that we are done with rq on this EP
1731                  */
1732                 if (callback) {
1733                         /*
1734                          * this callback will wake up the process that might be
1735                          * sleeping on the EP which data is being transferred
1736                          */
1737                         callback(tag, ep,
1738                                         end_points[ep].req_xfer_cnt,
1739                                         CY_AS_ERROR_SUCCESS);
1740                 }
1741         }
1742 }
1743
1744 /*
1745  * HANDLE DRQINT from Astoria (called in AS_Intr context
1746  */
1747 static void cy_handle_d_r_q_interrupt(cy_as_omap_dev_kernel *dev_p)
1748 {
1749         uint16_t v;
1750         static uint8_t service_ep = 2;
1751
1752         /*
1753          * We've got DRQ INT, read DRQ STATUS Register */
1754         v = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
1755                         CY_AS_MEM_P0_DRQ);
1756
1757         if (v == 0) {
1758 #ifndef WESTBRIDGE_NDEBUG
1759                 cy_as_hal_print_message("stray DRQ interrupt detected\n");
1760 #endif
1761                 return;
1762         }
1763
1764         /*
1765          * Now, pick a given DMA request to handle, for now, we just
1766          * go round robin.  Each bit position in the service_mask
1767          * represents an endpoint from EP2 to EP15.  We rotate through
1768          * each of the endpoints to find one that needs to be serviced.
1769          */
1770         while ((v & (1 << service_ep)) == 0) {
1771
1772                 if (service_ep == 15)
1773                         service_ep = 2;
1774                 else
1775                         service_ep++;
1776         }
1777
1778         if (end_points[service_ep].type == cy_as_hal_write) {
1779                 /*
1780                  * handle DMA WRITE REQUEST: app_cpu will
1781                  * write data into astoria EP buffer
1782                  */
1783                 cy_service_e_p_dma_write_request(dev_p, service_ep);
1784         } else if (end_points[service_ep].type == cy_as_hal_read) {
1785                 /*
1786                  * handle DMA READ REQUEST: cpu will
1787                  * read EP buffer from Astoria
1788                  */
1789                 cy_service_e_p_dma_read_request(dev_p, service_ep);
1790         }
1791 #ifndef WESTBRIDGE_NDEBUG
1792         else
1793                 cy_as_hal_print_message("cyashalomap:interrupt,"
1794                                         " w/o pending DMA job,"
1795                                         "-check DRQ_MASK logic\n");
1796 #endif
1797
1798         /*
1799          * Now bump the EP ahead, so other endpoints get
1800          * a shot before the one we just serviced
1801          */
1802         if (end_points[service_ep].type == cy_as_hal_none) {
1803                 if (service_ep == 15)
1804                         service_ep = 2;
1805                 else
1806                         service_ep++;
1807         }
1808
1809 }
1810
1811 void cy_as_hal_dma_cancel_request(cy_as_hal_device_tag tag, uint8_t ep)
1812 {
1813         DBGPRN("cy_as_hal_dma_cancel_request on ep:%d", ep);
1814         if (end_points[ep].pending)
1815                 cy_as_hal_write_register(tag,
1816                                 CY_AS_MEM_P0_EP2_DMA_REG + ep - 2, 0);
1817
1818         end_points[ep].buffer_valid = cy_false;
1819         end_points[ep].type = cy_as_hal_none;
1820 }
1821
1822 /*
1823  * enables/disables SG list assisted DMA xfers for the given EP
1824  * sg_list assisted XFERS can use physical addresses of mem pages in case if the
1825  * xfer is performed by a h/w DMA controller rather then the CPU on P port
1826  */
1827 void cy_as_hal_set_ep_dma_mode(uint8_t ep, bool sg_xfer_enabled)
1828 {
1829         end_points[ep].sg_list_enabled = sg_xfer_enabled;
1830         DBGPRN("<1> EP:%d sg_list assisted DMA mode set to = %d\n",
1831                         ep, end_points[ep].sg_list_enabled);
1832 }
1833 EXPORT_SYMBOL(cy_as_hal_set_ep_dma_mode);
1834
1835 /*
1836  * This function must be defined to transfer a block of data to
1837  * the WestBridge device.  This function can use the burst write
1838  * (DMA) capabilities of WestBridge to do this, or it can just copy
1839  * the data using writes.
1840  */
1841 void cy_as_hal_dma_setup_write(cy_as_hal_device_tag tag,
1842                                                 uint8_t ep, void *buf,
1843                                                 uint32_t size, uint16_t maxsize)
1844 {
1845         uint32_t addr = 0;
1846         uint16_t v  = 0;
1847
1848         /*
1849          * Note: "size" is the actual request size
1850          * "maxsize" - is the P port fragment size
1851          * No EP0 or EP1 traffic should get here
1852          */
1853         cy_as_hal_assert(ep != 0 && ep != 1);
1854
1855         /*
1856          * If this asserts, we have an ordering problem.  Another DMA request
1857          * is coming down before the previous one has completed.
1858          */
1859         cy_as_hal_assert(end_points[ep].buffer_valid == cy_false);
1860         end_points[ep].buffer_valid = cy_true;
1861         end_points[ep].type = cy_as_hal_write;
1862         end_points[ep].pending = cy_true;
1863
1864         /*
1865          * total length of the request
1866          */
1867         end_points[ep].req_length = size;
1868
1869         if (size >= maxsize) {
1870                 /*
1871                  * set xfer size for very 1st DMA xfer operation
1872                  * port max packet size ( typically 512 or 1024)
1873                  */
1874                 end_points[ep].dma_xfer_sz = maxsize;
1875         } else {
1876                 /*
1877                  * smaller xfers for non-storage EPs
1878                  */
1879                 end_points[ep].dma_xfer_sz = size;
1880         }
1881
1882         /*
1883          * check the EP transfer mode uses sg_list rather then a memory buffer
1884          * block devices pass it to the HAL, so the hAL could get to the real
1885          * physical address for each segment and set up a DMA controller
1886          * hardware ( if there is one)
1887          */
1888         if (end_points[ep].sg_list_enabled) {
1889                 /*
1890                  * buf -  pointer to the SG list
1891                  * data_p - data pointer to the 1st DMA segment
1892                  * seg_xfer_cnt - keeps track of N of bytes sent in current
1893                  *              sg_list segment
1894                  * req_xfer_cnt - keeps track of the total N of bytes
1895                  *              transferred for the request
1896                  */
1897                 end_points[ep].sg_p = buf;
1898                 end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
1899                 end_points[ep].seg_xfer_cnt = 0;
1900                 end_points[ep].req_xfer_cnt = 0;
1901
1902 #ifdef DBGPRN_DMA_SETUP_WR
1903                 DBGPRN("cyasomaphal:%s: EP:%d, buf:%p, buf_va:%p,"
1904                                 "req_sz:%d, maxsz:%d\n",
1905                                 __func__,
1906                                 ep,
1907                                 buf,
1908                                 end_points[ep].data_p,
1909                                 size,
1910                                 maxsize);
1911 #endif
1912
1913         } else {
1914                 /*
1915                  * setup XFER for non sg_list assisted EPs
1916                  */
1917
1918                 #ifdef DBGPRN_DMA_SETUP_WR
1919                         DBGPRN("<1>%s non storage or sz < 512:"
1920                                         "EP:%d, sz:%d\n", __func__, ep, size);
1921                 #endif
1922
1923                 end_points[ep].sg_p = NULL;
1924
1925                 /*
1926                  * must be a VMA of a membuf in kernel space
1927                  */
1928                 end_points[ep].data_p = buf;
1929
1930                 /*
1931                  * will keep track No of bytes xferred for the request
1932                  */
1933                 end_points[ep].req_xfer_cnt = 0;
1934         }
1935
1936         /*
1937          * Tell WB we are ready to send data on the given endpoint
1938          */
1939         v = (end_points[ep].dma_xfer_sz & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK)
1940                         | CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
1941
1942         addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1943
1944         cy_as_hal_write_register(tag, addr, v);
1945 }
1946
1947 /*
1948  * This function must be defined to transfer a block of data from
1949  * the WestBridge device.  This function can use the burst read
1950  * (DMA) capabilities of WestBridge to do this, or it can just
1951  * copy the data using reads.
1952  */
1953 void cy_as_hal_dma_setup_read(cy_as_hal_device_tag tag,
1954                                         uint8_t ep, void *buf,
1955                                         uint32_t size, uint16_t maxsize)
1956 {
1957         uint32_t addr;
1958         uint16_t v;
1959
1960         /*
1961          * Note: "size" is the actual request size
1962          * "maxsize" - is the P port fragment size
1963          * No EP0 or EP1 traffic should get here
1964          */
1965         cy_as_hal_assert(ep != 0 && ep != 1);
1966
1967         /*
1968          * If this asserts, we have an ordering problem.
1969          * Another DMA request is coming down before the
1970          * previous one has completed. we should not get
1971          * new requests if current is still in process
1972          */
1973
1974         cy_as_hal_assert(end_points[ep].buffer_valid == cy_false);
1975
1976         end_points[ep].buffer_valid = cy_true;
1977         end_points[ep].type = cy_as_hal_read;
1978         end_points[ep].pending = cy_true;
1979         end_points[ep].req_xfer_cnt = 0;
1980         end_points[ep].req_length = size;
1981
1982         if (size >= maxsize) {
1983                 /*
1984                  * set xfer size for very 1st DMA xfer operation
1985                  * port max packet size ( typically 512 or 1024)
1986                  */
1987                 end_points[ep].dma_xfer_sz = maxsize;
1988         } else {
1989                 /*
1990                  * so that we could handle small xfers on in case
1991                  * of non-storage EPs
1992                  */
1993                 end_points[ep].dma_xfer_sz = size;
1994         }
1995
1996         addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
1997
1998         if (end_points[ep].sg_list_enabled) {
1999                 /*
2000                  * Handle sg-list assisted EPs
2001                  * seg_xfer_cnt - keeps track of N of sent packets
2002                  * buf - pointer to the SG list
2003                  * data_p - data pointer for the 1st DMA segment
2004                  */
2005                 end_points[ep].seg_xfer_cnt = 0;
2006                 end_points[ep].sg_p = buf;
2007                 end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
2008
2009                 #ifdef DBGPRN_DMA_SETUP_RD
2010                 DBGPRN("cyasomaphal:DMA_setup_read sg_list EP:%d, "
2011                            "buf:%p, buf_va:%p, req_sz:%d, maxsz:%d\n",
2012                                 ep,
2013                                 buf,
2014                                 end_points[ep].data_p,
2015                                 size,
2016                                 maxsize);
2017                 #endif
2018                 v = (end_points[ep].dma_xfer_sz &
2019                                 CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
2020                                 CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
2021                 cy_as_hal_write_register(tag, addr, v);
2022         } else {
2023                 /*
2024                  * Non sg list EP passed  void *buf rather then scatterlist *sg
2025                  */
2026                 #ifdef DBGPRN_DMA_SETUP_RD
2027                         DBGPRN("%s:non-sg_list EP:%d,"
2028                                         "RQ_sz:%d, maxsz:%d\n",
2029                                         __func__, ep, size,  maxsize);
2030                 #endif
2031
2032                 end_points[ep].sg_p = NULL;
2033
2034                 /*
2035                  * must be a VMA of a membuf in kernel space
2036                  */
2037                 end_points[ep].data_p = buf;
2038
2039                 /*
2040                  * Program the EP DMA register for Storage endpoints only.
2041                  */
2042                 if (is_storage_e_p(ep)) {
2043                         v = (end_points[ep].dma_xfer_sz &
2044                                         CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
2045                                         CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
2046                         cy_as_hal_write_register(tag, addr, v);
2047                 }
2048         }
2049 }
2050
2051 /*
2052  * This function must be defined to allow the WB API to
2053  * register a callback function that is called when a
2054  * DMA transfer is complete.
2055  */
2056 void cy_as_hal_dma_register_callback(cy_as_hal_device_tag tag,
2057                                         cy_as_hal_dma_complete_callback cb)
2058 {
2059         DBGPRN("<1>\n%s: WB API has registered a dma_complete callback:%x\n",
2060                         __func__, (uint32_t)cb);
2061         callback = cb;
2062 }
2063
2064 /*
2065  * This function must be defined to return the maximum size of
2066  * DMA request that can be handled on the given endpoint.  The
2067  * return value should be the maximum size in bytes that the DMA
2068  * module can handle.
2069  */
2070 uint32_t cy_as_hal_dma_max_request_size(cy_as_hal_device_tag tag,
2071                                         cy_as_end_point_number_t ep)
2072 {
2073         /*
2074          * Storage reads and writes are always done in 512 byte blocks.
2075          * So, we do the count handling within the HAL, and save on
2076          * some of the data transfer delay.
2077          */
2078         if ((ep == CYASSTORAGE_READ_EP_NUM) ||
2079         (ep == CYASSTORAGE_WRITE_EP_NUM)) {
2080                 /* max DMA request size HAL can handle by itself */
2081                 return CYASSTORAGE_MAX_XFER_SIZE;
2082         } else {
2083         /*
2084          * For the USB - Processor endpoints, the maximum transfer
2085          * size depends on the speed of USB operation. So, we use
2086          * the following constant to indicate to the API that
2087          * splitting of the data into chunks less that or equal to
2088          * the max transfer size should be handled internally.
2089          */
2090
2091                 /* DEFINED AS 0xffffffff in cyasdma.h */
2092                 return CY_AS_DMA_MAX_SIZE_HW_SIZE;
2093         }
2094 }
2095
2096 /*
2097  * This function must be defined to set the state of the WAKEUP pin
2098  * on the WestBridge device.  Generally this is done via a GPIO of
2099  * some type.
2100  */
2101 cy_bool cy_as_hal_set_wakeup_pin(cy_as_hal_device_tag tag, cy_bool state)
2102 {
2103         /*
2104          * Not supported as of now.
2105          */
2106         return cy_false;
2107 }
2108
2109 void cy_as_hal_pll_lock_loss_handler(cy_as_hal_device_tag tag)
2110 {
2111         cy_as_hal_print_message("error: astoria PLL lock is lost\n");
2112         cy_as_hal_print_message("please check the input voltage levels");
2113         cy_as_hal_print_message("and clock, and restart the system\n");
2114 }
2115
2116 /*
2117  * Below are the functions that must be defined to provide the basic
2118  * operating system services required by the API.
2119  */
2120
2121 /*
2122  * This function is required by the API to allocate memory.
2123  * This function is expected to work exactly like malloc().
2124  */
2125 void *cy_as_hal_alloc(uint32_t cnt)
2126 {
2127         return kmalloc(cnt, GFP_ATOMIC);
2128 }
2129
2130 /*
2131  * This function is required by the API to free memory allocated
2132  * with CyAsHalAlloc().  This function is'expected to work exacly
2133  * like free().
2134  */
2135 void cy_as_hal_free(void *mem_p)
2136 {
2137         kfree(mem_p);
2138 }
2139
2140 /*
2141  * Allocator that can be used in interrupt context.
2142  * We have to ensure that the kmalloc call does not
2143  * sleep in this case.
2144  */
2145 void *cy_as_hal_c_b_alloc(uint32_t cnt)
2146 {
2147         return kmalloc(cnt, GFP_ATOMIC);
2148 }
2149
2150 /*
2151  * This function is required to set a block of memory to a
2152  * specific value.  This function is expected to work exactly
2153  * like memset()
2154  */
2155 void cy_as_hal_mem_set(void *ptr, uint8_t value, uint32_t cnt)
2156 {
2157         memset(ptr, value, cnt);
2158 }
2159
2160 /*
2161  * This function is expected to create a sleep channel.
2162  * The data structure that represents the sleep channel object
2163  * sleep channel (which is Linux "wait_queue_head_t wq" for this paticular HAL)
2164  * passed as a pointer, and allpocated by the caller
2165  * (typically as a local var on the stack) "Create" word should read as
2166  * "SleepOn", this func doesn't actually create anything
2167  */
2168 cy_bool cy_as_hal_create_sleep_channel(cy_as_hal_sleep_channel *channel)
2169 {
2170         init_waitqueue_head(&channel->wq);
2171         return cy_true;
2172 }
2173
2174 /*
2175  * for this particular HAL it doesn't actually destroy anything
2176  * since no actual sleep object is created in CreateSleepChannel()
2177  * sleep channel is given by the pointer in the argument.
2178  */
2179 cy_bool cy_as_hal_destroy_sleep_channel(cy_as_hal_sleep_channel *channel)
2180 {
2181         return cy_true;
2182 }
2183
2184 /*
2185  * platform specific wakeable Sleep implementation
2186  */
2187 cy_bool cy_as_hal_sleep_on(cy_as_hal_sleep_channel *channel, uint32_t ms)
2188 {
2189         wait_event_interruptible_timeout(channel->wq, 0, ((ms * HZ)/1000));
2190         return cy_true;
2191 }
2192
2193 /*
2194  * wakes up the process waiting on the CHANNEL
2195  */
2196 cy_bool cy_as_hal_wake(cy_as_hal_sleep_channel *channel)
2197 {
2198         wake_up_interruptible_all(&channel->wq);
2199         return cy_true;
2200 }
2201
2202 uint32_t cy_as_hal_disable_interrupts()
2203 {
2204         if (0 == intr__enable)
2205                 ;
2206
2207         intr__enable++;
2208         return 0;
2209 }
2210
2211 void cy_as_hal_enable_interrupts(uint32_t val)
2212 {
2213         intr__enable--;
2214         if (0 == intr__enable)
2215                 ;
2216 }
2217
2218 /*
2219  * Sleep atleast 150ns, cpu dependent
2220  */
2221 void cy_as_hal_sleep150(void)
2222 {
2223         uint32_t i, j;
2224
2225         j = 0;
2226         for (i = 0; i < 1000; i++)
2227                 j += (~i);
2228 }
2229
2230 void cy_as_hal_sleep(uint32_t ms)
2231 {
2232         cy_as_hal_sleep_channel channel;
2233
2234         cy_as_hal_create_sleep_channel(&channel);
2235         cy_as_hal_sleep_on(&channel, ms);
2236         cy_as_hal_destroy_sleep_channel(&channel);
2237 }
2238
2239 cy_bool cy_as_hal_is_polling()
2240 {
2241         return cy_false;
2242 }
2243
2244 void cy_as_hal_c_b_free(void *ptr)
2245 {
2246         cy_as_hal_free(ptr);
2247 }
2248
2249 /*
2250  * suppose to reinstate the astoria registers
2251  * that may be clobbered in sleep mode
2252  */
2253 void cy_as_hal_init_dev_registers(cy_as_hal_device_tag tag,
2254                                         cy_bool is_standby_wakeup)
2255 {
2256         /* specific to SPI, no implementation required */
2257         (void) tag;
2258         (void) is_standby_wakeup;
2259 }
2260
2261 void cy_as_hal_read_regs_before_standby(cy_as_hal_device_tag tag)
2262 {
2263         /* specific to SPI, no implementation required */
2264         (void) tag;
2265 }
2266
2267 cy_bool cy_as_hal_sync_device_clocks(cy_as_hal_device_tag tag)
2268 {
2269         /*
2270          * we are in asynchronous mode. so no need to handle this
2271          */
2272         return true;
2273 }
2274
2275 /*
2276  * init OMAP h/w resources
2277  */
2278 int start_o_m_a_p_kernel(const char *pgm,
2279                                 cy_as_hal_device_tag *tag, cy_bool debug)
2280 {
2281         cy_as_omap_dev_kernel *dev_p;
2282         int i;
2283         u16 data16[4];
2284         u8 pncfg_reg;
2285
2286         /*
2287          * No debug mode support through argument as of now
2288          */
2289         (void)debug;
2290
2291         DBGPRN(KERN_INFO"starting OMAP34xx HAL...\n");
2292
2293         /*
2294          * Initialize the HAL level endpoint DMA data.
2295          */
2296         for (i = 0; i < sizeof(end_points)/sizeof(end_points[0]); i++) {
2297                 end_points[i].data_p = 0;
2298                 end_points[i].pending = cy_false;
2299                 end_points[i].size = 0;
2300                 end_points[i].type = cy_as_hal_none;
2301                 end_points[i].sg_list_enabled = cy_false;
2302
2303                 /*
2304                  * by default the DMA transfers to/from the E_ps don't
2305                  * use sg_list that implies that the upper devices like
2306                  * blockdevice have to enable it for the E_ps in their
2307                  * initialization code
2308                  */
2309         }
2310
2311         /*
2312          * allocate memory for OMAP HAL
2313          */
2314         dev_p = (cy_as_omap_dev_kernel *)cy_as_hal_alloc(
2315                                                 sizeof(cy_as_omap_dev_kernel));
2316         if (dev_p == 0) {
2317                 cy_as_hal_print_message("out of memory allocating OMAP"
2318                                         "device structure\n");
2319                 return 0;
2320         }
2321
2322         dev_p->m_sig = CY_AS_OMAP_KERNEL_HAL_SIG;
2323
2324         /*
2325          * initialize OMAP hardware and StartOMAPKernelall gpio pins
2326          */
2327         dev_p->m_addr_base = (void *)cy_as_hal_processor_hw_init();
2328
2329         /*
2330          * Now perform a hard reset of the device to have
2331          * the new settings take effect
2332          */
2333         __gpio_set_value(AST_WAKEUP, 1);
2334
2335         /*
2336          * do Astoria  h/w reset
2337          */
2338         DBGPRN(KERN_INFO"-_-_pulse -> westbridge RST pin\n");
2339
2340         /*
2341          * NEGATIVE PULSE on RST pin
2342          */
2343         __gpio_set_value(AST_RESET, 0);
2344         mdelay(1);
2345         __gpio_set_value(AST_RESET, 1);
2346         mdelay(50);
2347
2348         /*
2349         * note AFTER reset PNAND interface is 8 bit mode
2350         * so if gpmc Is configured in 8 bit mode upper half will be FF
2351         */
2352         pncfg_reg = ast_p_nand_casdo_read(CY_AS_MEM_PNAND_CFG);
2353
2354 #ifdef PNAND_16BIT_MODE
2355
2356         /*
2357          * switch to 16 bit mode, force NON-LNA LBD mode, 3 RA addr bytes
2358          */
2359         ast_p_nand_casdi_write(CY_AS_MEM_PNAND_CFG, 0x0001);
2360
2361         /*
2362          * now in order to continue to talk to astoria
2363          * sw OMAP GPMC into 16 bit mode as well
2364          */
2365         cy_as_hal_gpmc_enable_16bit_bus(cy_true);
2366 #else
2367    /* Astoria and GPMC are already in 8 bit mode, jsut initialize PNAND_CFG */
2368         ast_p_nand_casdi_write(CY_AS_MEM_PNAND_CFG, 0x0000);
2369 #endif
2370
2371    /*
2372         *  NOTE: if you want to capture bus activity on the LA,
2373         *  don't use printks in between the activities you want to capture.
2374         *  prinks may take milliseconds, and the data of interest
2375         *  will fall outside the LA capture window/buffer
2376         */
2377         data16[0] = ast_p_nand_casdo_read(CY_AS_MEM_CM_WB_CFG_ID);
2378         data16[1] = ast_p_nand_casdo_read(CY_AS_MEM_PNAND_CFG);
2379
2380         if (data16[0] != 0xA200) {
2381                 /*
2382                  * astoria device is not found
2383                  */
2384                 printk(KERN_ERR "ERROR: astoria device is not found, CY_AS_MEM_CM_WB_CFG_ID ");
2385                 printk(KERN_ERR "read returned:%4.4X: CY_AS_MEM_PNAND_CFG:%4.4x !\n",
2386                                 data16[0], data16[0]);
2387                 goto bus_acc_error;
2388         }
2389
2390         cy_as_hal_print_message(KERN_INFO" register access CASDO test:"
2391                                 "\n CY_AS_MEM_CM_WB_CFG_ID:%4.4x\n"
2392                                 "PNAND_CFG after RST:%4.4x\n "
2393                                 "CY_AS_MEM_PNAND_CFG"
2394                                 "after cfg_wr:%4.4x\n\n",
2395                                 data16[0], pncfg_reg, data16[1]);
2396
2397         dev_p->thread_flag = 1;
2398         spin_lock_init(&int_lock);
2399         dev_p->m_next_p = m_omap_list_p;
2400
2401         m_omap_list_p = dev_p;
2402         *tag = dev_p;
2403
2404         cy_as_hal_configure_interrupts((void *)dev_p);
2405
2406         cy_as_hal_print_message(KERN_INFO"OMAP3430__hal started tag:%p"
2407                                 ", kernel HZ:%d\n", dev_p, HZ);
2408
2409         /*
2410          *make processor to storage endpoints SG assisted by default
2411          */
2412         cy_as_hal_set_ep_dma_mode(4, true);
2413         cy_as_hal_set_ep_dma_mode(8, true);
2414
2415         return 1;
2416
2417         /*
2418          * there's been a NAND bus access error or
2419          * astoria device is not connected
2420          */
2421 bus_acc_error:
2422         /*
2423          * at this point hal tag hasn't been set yet
2424          * so the device will not call omap_stop
2425          */
2426         cy_as_hal_omap_hardware_deinit(dev_p);
2427         cy_as_hal_free(dev_p);
2428         return 0;
2429 }
2430
2431 #else
2432 /*
2433  * Some compilers do not like empty C files, so if the OMAP hal is not being
2434  * compiled, we compile this single function.  We do this so that for a
2435  * given target HAL there are not multiple sources for the HAL functions.
2436  */
2437 void my_o_m_a_p_kernel_hal_dummy_function(void)
2438 {
2439 }
2440
2441 #endif