Merge omap-upstream
authorTony Lindgren <tony@atomide.com>
Mon, 6 Aug 2007 12:44:03 +0000 (05:44 -0700)
committerTony Lindgren <tony@atomide.com>
Mon, 6 Aug 2007 12:44:03 +0000 (05:44 -0700)
Merge branches 'master' and 'omap-upstream'

Conflicts:

arch/arm/Kconfig
arch/arm/boot/compressed/head.S
arch/arm/configs/omap_h2_1610_defconfig
arch/arm/configs/omap_osk_5912_defconfig
arch/arm/mach-omap1/board-h2.c
arch/arm/mach-omap1/board-palmte.c
arch/arm/mach-omap1/board-palmtt.c
arch/arm/mach-omap1/board-palmz71.c
arch/arm/mach-omap1/board-sx1.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/board-2430sdp.c
arch/arm/mach-omap2/board-apollon-keys.c
arch/arm/mach-omap2/board-apollon.c
arch/arm/mach-omap2/board-h4.c
arch/arm/mach-omap2/board-n800-audio.c
arch/arm/mach-omap2/board-n800-dsp.c
arch/arm/mach-omap2/board-n800-usb.c
arch/arm/mach-omap2/board-n800.c
arch/arm/mach-omap2/devices.c
arch/arm/mach-omap2/gpmc.c
arch/arm/mach-omap2/id.c
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/irq.c
arch/arm/mach-omap2/memory.c
arch/arm/mach-omap2/mux.c
arch/arm/mach-omap2/pm.c
arch/arm/mach-omap2/sleep.S
arch/arm/mm/proc-v7.S
arch/arm/plat-omap/Makefile
arch/arm/plat-omap/common.c
arch/arm/plat-omap/timer32k.c
drivers/char/watchdog/omap_wdt.c
drivers/i2c/chips/Makefile
drivers/i2c/chips/menelaus.c
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/spi/Kconfig
drivers/spi/Makefile
drivers/spi/omap2_mcspi.c
drivers/video/omap/Kconfig
drivers/video/omap/Makefile
drivers/video/omap/blizzard.c
drivers/video/omap/dispc.c
drivers/video/omap/lcd_inn1510.c
drivers/video/omap/lcd_inn1610.c
drivers/video/omap/lcd_palmte.c
drivers/video/omap/lcd_palmtt.c
drivers/video/omap/lcd_palmz71.c
drivers/video/omap/lcd_sx1.c
drivers/video/omap/lcdc.c
drivers/video/omap/omapfb_main.c
drivers/video/omap/rfbi.c
drivers/video/omap/sossi.c
include/asm-arm/arch-omap/board-2430sdp.h
include/asm-arm/arch-omap/eac.h
include/asm-arm/arch-omap/gpio.h
include/asm-arm/arch-omap/hardware.h
include/asm-arm/arch-omap/io.h
include/asm-arm/arch-omap/menelaus.h
include/asm-arm/arch-omap/mmc.h
include/asm-arm/arch-omap/omap24xx.h
include/asm-arm/arch-omap/onenand.h
include/asm-arm/arch-omap/pm.h
include/linux/input.h
kernel/printk.c

50 files changed:
1  2 
Makefile
arch/arm/Kconfig
arch/arm/Makefile
arch/arm/boot/compressed/Makefile
arch/arm/kernel/entry-common.S
arch/arm/mach-omap2/mmu.c
arch/arm/mm/Kconfig
arch/arm/plat-omap/dsp/dsp_common.c
arch/arm/plat-omap/mmu.c
arch/arm/plat-omap/timer32k.c
arch/arm/vfp/vfphw.S
arch/arm/vfp/vfpmodule.c
drivers/Makefile
drivers/char/watchdog/omap_wdt.c
drivers/i2c/busses/Makefile
drivers/i2c/chips/Kconfig
drivers/i2c/chips/Makefile
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/Makefile
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/leds-omap-pwm.c
drivers/media/radio/Kconfig
drivers/media/video/Kconfig
drivers/media/video/Makefile
drivers/mmc/host/omap.c
drivers/net/irda/Kconfig
drivers/net/irda/Makefile
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/serial/8250.c
drivers/spi/Kconfig
drivers/spi/Makefile
drivers/usb/Kconfig
drivers/usb/gadget/Kconfig
drivers/usb/gadget/omap_udc.c
drivers/usb/musb/g_ep0.c
drivers/usb/musb/musb_gadget.c
drivers/video/Makefile
drivers/video/backlight/Kconfig
drivers/video/omap/Makefile
drivers/video/omap/rfbi.c
drivers/video/omap/sossi.c
include/asm-arm/pgtable.h
include/asm-arm/thread_info.h
include/asm-arm/vfp.h
include/linux/i2c-id.h
kernel/printk.c
net/ipv4/netfilter/Kconfig
sound/oss/Makefile

diff --cc Makefile
Simple merge
@@@ -1050,10 -1053,8 +1066,12 @@@ source "drivers/mmc/Kconfig
  
  source "drivers/rtc/Kconfig"
  
+ source "drivers/dma/Kconfig"
 +if ARCH_OMAP
 +source "drivers/cbus/Kconfig"
 +endif
 +
  endmenu
  
  source "fs/Kconfig"
Simple merge
Simple merge
Simple merge
index 2f0e6ed,0000000..08f8995
mode 100644,000000..100644
--- /dev/null
@@@ -1,329 -1,0 +1,330 @@@
 +/*
 + * linux/arch/arm/mach-omap2/mmu.c
 + *
 + * Support for non-MPU OMAP2 MMUs.
 + *
 + * Copyright (C) 2002-2007 Nokia Corporation
 + *
 + * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
 + *        and Paul Mundt <paul.mundt@nokia.com>
 + *
 + * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation; either version 2 of the License, or
 + * (at your option) any later version.
 + *
 + * This program is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 + * GNU General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 + */
 +#include <linux/types.h>
 +#include <linux/init.h>
 +#include <linux/rwsem.h>
 +#include <linux/device.h>
 +#include <linux/mm.h>
 +#include <linux/interrupt.h>
++#include <linux/err.h>
 +#include "mmu.h"
 +#include <asm/arch/mmu.h>
 +#include <asm/tlbflush.h>
 +#include <asm/io.h>
 +#include <asm/sizes.h>
 +
 +static void *dspvect_page;
 +#define DSP_INIT_PAGE 0xfff000
 +
 +static inline void
 +omap2_mmu_read_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
 +{
 +      cr->cam = omap_mmu_read_reg(mmu, MMU_READ_CAM);
 +      cr->ram = omap_mmu_read_reg(mmu, MMU_READ_RAM);
 +}
 +
 +static inline void
 +omap2_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
 +{
 +      /* Set the CAM and RAM entries */
 +      omap_mmu_write_reg(mmu, cr->cam | OMAP_MMU_CAM_V, MMU_CAM);
 +      omap_mmu_write_reg(mmu, cr->ram, MMU_RAM);
 +}
 +
 +static void exmap_setup_iomap_page(struct omap_mmu *mmu, unsigned long phys,
 +                                 unsigned long dsp_io_adr, int index)
 +{
 +      unsigned long dspadr;
 +      void *virt;
 +      struct omap_mmu_tlb_entry tlb_ent;
 +
 +      dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
 +      virt = omap_mmu_to_virt(mmu, dspadr);
 +      exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
 +      INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, NULL, virt);
 +      INIT_TLB_ENTRY_4KB_ES32_PRESERVED(&tlb_ent, dspadr, phys);
 +      omap_mmu_load_pte_entry(mmu, &tlb_ent);
 +}
 +
 +static void exmap_clear_iomap_page(struct omap_mmu *mmu,
 +                                 unsigned long dsp_io_adr)
 +{
 +      unsigned long dspadr;
 +      void *virt;
 +
 +      dspadr = (IOMAP_VAL << 18) + (dsp_io_adr << 1);
 +      virt = omap_mmu_to_virt(mmu, dspadr);
 +      exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
 +      /* DSP MMU is shutting down. not handled here. */
 +}
 +
 +#define OMAP24XX_MAILBOX_BASE (L4_24XX_BASE + 0x94000)
 +#define OMAP2420_GPT5_BASE    (L4_24XX_BASE + 0x7c000)
 +#define OMAP2420_GPT6_BASE    (L4_24XX_BASE + 0x7e000)
 +#define OMAP2420_GPT7_BASE    (L4_24XX_BASE + 0x80000)
 +#define OMAP2420_GPT8_BASE    (L4_24XX_BASE + 0x82000)
 +#define OMAP24XX_EAC_BASE     (L4_24XX_BASE + 0x90000)
 +#define OMAP24XX_STI_BASE     (L4_24XX_BASE + 0x68000)
 +#define OMAP24XX_STI_CH_BASE  (L4_24XX_BASE + 0x0c000000)
 +
 +static int exmap_setup_preserved_entries(struct omap_mmu *mmu)
 +{
 +      int i, n = 0;
 +
 +      exmap_setup_preserved_mem_page(mmu, dspvect_page, DSP_INIT_PAGE, n++);
 +
 +      /* REVISIT: This will need to be revisited for 3430 */
 +      exmap_setup_iomap_page(mmu, OMAP2_PRCM_BASE, 0x7000, n++);
 +      exmap_setup_iomap_page(mmu, OMAP24XX_MAILBOX_BASE, 0x11000, n++);
 +
 +      if (cpu_is_omap2420()) {
 +              exmap_setup_iomap_page(mmu, OMAP2420_GPT5_BASE, 0xe000, n++);
 +              exmap_setup_iomap_page(mmu, OMAP2420_GPT6_BASE, 0xe800, n++);
 +              exmap_setup_iomap_page(mmu, OMAP2420_GPT7_BASE, 0xf000, n++);
 +              exmap_setup_iomap_page(mmu, OMAP2420_GPT8_BASE, 0xf800, n++);
 +              exmap_setup_iomap_page(mmu, OMAP24XX_EAC_BASE,  0x10000, n++);
 +              exmap_setup_iomap_page(mmu, OMAP24XX_STI_BASE, 0xc800, n++);
 +              for (i = 0; i < 5; i++)
 +                      exmap_setup_preserved_mem_page(mmu,
 +                              __va(OMAP24XX_STI_CH_BASE + i*SZ_4K),
 +                              0xfb0000 + i*SZ_4K, n++);
 +      }
 +
 +      return n;
 +}
 +
 +static void exmap_clear_preserved_entries(struct omap_mmu *mmu)
 +{
 +      int i;
 +
 +      exmap_clear_iomap_page(mmu, 0x7000);    /* PRCM registers */
 +      exmap_clear_iomap_page(mmu, 0x11000);   /* MAILBOX registers */
 +
 +      if (cpu_is_omap2420()) {
 +              exmap_clear_iomap_page(mmu, 0xe000);    /* GPT5 */
 +              exmap_clear_iomap_page(mmu, 0xe800);    /* GPT6 */
 +              exmap_clear_iomap_page(mmu, 0xf000);    /* GPT7 */
 +              exmap_clear_iomap_page(mmu, 0xf800);    /* GPT8 */
 +              exmap_clear_iomap_page(mmu, 0x10000);   /* EAC */
 +              exmap_clear_iomap_page(mmu, 0xc800);    /* STI */
 +              for (i = 0; i < 5; i++)                 /* STI CH */
 +                      exmap_clear_mem_page(mmu, 0xfb0000 + i*SZ_4K);
 +      }
 +
 +      exmap_clear_mem_page(mmu, DSP_INIT_PAGE);
 +}
 +
 +#define MMU_IRQ_MASK \
 +      (OMAP_MMU_IRQ_MULTIHITFAULT | \
 +       OMAP_MMU_IRQ_TABLEWALKFAULT | \
 +       OMAP_MMU_IRQ_EMUMISS | \
 +       OMAP_MMU_IRQ_TRANSLATIONFAULT)
 +
 +static int omap2_mmu_startup(struct omap_mmu *mmu)
 +{
 +      dspvect_page = (void *)__get_dma_pages(GFP_KERNEL, 0);
 +      if (dspvect_page == NULL) {
 +              printk(KERN_ERR "MMU: failed to allocate memory "
 +                              "for dsp vector table\n");
 +              return -ENOMEM;
 +      }
 +
 +      mmu->nr_exmap_preserved = exmap_setup_preserved_entries(mmu);
 +
 +      omap_mmu_write_reg(mmu, MMU_IRQ_MASK, MMU_IRQENABLE);
 +
 +      return 0;
 +}
 +
 +static void omap2_mmu_shutdown(struct omap_mmu *mmu)
 +{
 +      exmap_clear_preserved_entries(mmu);
 +
 +      if (dspvect_page != NULL) {
 +              unsigned long virt;
 +
 +              down_read(&mmu->exmap_sem);
 +
 +              virt = (unsigned long)omap_mmu_to_virt(mmu, DSP_INIT_PAGE);
 +              flush_tlb_kernel_range(virt, virt + PAGE_SIZE);
 +              free_page((unsigned long)dspvect_page);
 +              dspvect_page = NULL;
 +
 +              up_read(&mmu->exmap_sem);
 +      }
 +}
 +
 +static ssize_t omap2_mmu_show(struct omap_mmu *mmu, char *buf,
 +                            struct omap_mmu_tlb_lock *tlb_lock)
 +{
 +      int i, len;
 +
 +      len = sprintf(buf, "P: preserved, V: valid\n"
 +                         "B: big endian, L:little endian, "
 +                         "M: mixed page attribute\n"
 +                         "ety P V size   cam_va     ram_pa E ES M\n");
 +                       /* 00: P V  4KB 0x300000 0x10171800 B 16 M */
 +
 +      for (i = 0; i < mmu->nr_tlb_entries; i++) {
 +              struct omap_mmu_tlb_entry ent;
 +              struct cam_ram_regset cr;
 +              struct omap_mmu_tlb_lock entry_lock;
 +              char *pgsz_str, *elsz_str;
 +
 +              /* read a TLB entry */
 +              entry_lock.base   = tlb_lock->base;
 +              entry_lock.victim = i;
 +              omap_mmu_read_tlb(mmu, &entry_lock, &cr);
 +
 +              ent.pgsz   = cr.cam & OMAP_MMU_CAM_PAGESIZE_MASK;
 +              ent.prsvd  = cr.cam & OMAP_MMU_CAM_P;
 +              ent.valid  = cr.cam & OMAP_MMU_CAM_V;
 +              ent.va     = cr.cam & OMAP_MMU_CAM_VATAG_MASK;
 +              ent.endian = cr.ram & OMAP_MMU_RAM_ENDIANNESS;
 +              ent.elsz   = cr.ram & OMAP_MMU_RAM_ELEMENTSIZE_MASK;
 +              ent.pa     = cr.ram & OMAP_MMU_RAM_PADDR_MASK;
 +              ent.mixed  = cr.ram & OMAP_MMU_RAM_MIXED;
 +
 +              pgsz_str = (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_16MB) ? "64MB":
 +                         (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_1MB)  ? " 1MB":
 +                         (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_64KB) ? "64KB":
 +                         (ent.pgsz == OMAP_MMU_CAM_PAGESIZE_4KB)  ? " 4KB":
 +                                                                   " ???";
 +              elsz_str = (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_8)  ? " 8":
 +                         (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_16) ? "16":
 +                         (ent.elsz == OMAP_MMU_RAM_ELEMENTSIZE_32) ? "32":
 +                                                                    "??";
 +
 +              if (i == tlb_lock->base)
 +                      len += sprintf(buf + len, "lock base = %d\n",
 +                                     tlb_lock->base);
 +              if (i == tlb_lock->victim)
 +                      len += sprintf(buf + len, "victim    = %d\n",
 +                                     tlb_lock->victim);
 +
 +              len += sprintf(buf + len,
 +                             /* 00: P V  4KB 0x300000 0x10171800 B 16 M */
 +                             "%02d: %c %c %s 0x%06lx 0x%08lx %c %s %c\n",
 +                             i,
 +                             ent.prsvd ? 'P' : ' ',
 +                             ent.valid ? 'V' : ' ',
 +                             pgsz_str, ent.va, ent.pa,
 +                             ent.endian ? 'B' : 'L',
 +                             elsz_str,
 +                             ent.mixed ? 'M' : ' ');
 +      }
 +
 +      return len;
 +}
 +
 +#define get_cam_va_mask(pgsz) \
 +      (((pgsz) == OMAP_MMU_CAM_PAGESIZE_16MB) ? 0xff000000 : \
 +       ((pgsz) == OMAP_MMU_CAM_PAGESIZE_1MB)  ? 0xfff00000 : \
 +       ((pgsz) == OMAP_MMU_CAM_PAGESIZE_64KB) ? 0xffff0000 : \
 +       ((pgsz) == OMAP_MMU_CAM_PAGESIZE_4KB)  ? 0xfffff000 : 0)
 +
 +static inline unsigned long omap2_mmu_cam_va(struct cam_ram_regset *cr)
 +{
 +      unsigned int page_size = cr->cam & OMAP_MMU_CAM_PAGESIZE_MASK;
 +      unsigned int mask = get_cam_va_mask(cr->cam & page_size);
 +
 +      return cr->cam & mask;
 +}
 +
 +static struct cam_ram_regset *
 +omap2_mmu_cam_ram_alloc(struct omap_mmu_tlb_entry *entry)
 +{
 +      struct cam_ram_regset *cr;
 +
 +      if (entry->va & ~(get_cam_va_mask(entry->pgsz))) {
 +              printk(KERN_ERR "MMU: mapping vadr (0x%06lx) is not on an "
 +                     "aligned boundary\n", entry->va);
 +              return ERR_PTR(-EINVAL);
 +      }
 +
 +      cr = kmalloc(sizeof(struct cam_ram_regset), GFP_KERNEL);
 +
 +      cr->cam = (entry->va & OMAP_MMU_CAM_VATAG_MASK) |
 +                entry->prsvd | entry->pgsz;
 +      cr->ram = entry->pa | entry->endian | entry->elsz;
 +
 +      return cr;
 +}
 +
 +static inline int omap2_mmu_cam_ram_valid(struct cam_ram_regset *cr)
 +{
 +      return cr->cam & OMAP_MMU_CAM_V;
 +}
 +
 +static void omap2_mmu_interrupt(struct omap_mmu *mmu)
 +{
 +      unsigned long status, va;
 +
 +      status = MMU_IRQ_MASK & omap_mmu_read_reg(mmu, MMU_IRQSTATUS);
 +      va = omap_mmu_read_reg(mmu, MMU_FAULT_AD);
 +
 +      pr_info("%s\n", (status & OMAP_MMU_IRQ_MULTIHITFAULT)           ? "multi hit":"");
 +      pr_info("%s\n", (status & OMAP_MMU_IRQ_TABLEWALKFAULT)          ? "table walk fault":"");
 +      pr_info("%s\n", (status & OMAP_MMU_IRQ_EMUMISS)                 ? "EMU miss":"");
 +      pr_info("%s\n", (status & OMAP_MMU_IRQ_TRANSLATIONFAULT)        ? "translation fault":"");
 +      pr_info("%s\n", (status & OMAP_MMU_IRQ_TLBMISS)                 ? "TLB miss":"");
 +      pr_info("fault address = %#08lx\n", va);
 +
 +      omap_mmu_disable(mmu);
 +      omap_mmu_write_reg(mmu, status, MMU_IRQSTATUS);
 +
 +      mmu->fault_address = va;
 +      schedule_work(&mmu->irq_work);
 +}
 +
 +static pgprot_t omap2_mmu_pte_get_attr(struct omap_mmu_tlb_entry *entry)
 +{
 +      u32 attr;
 +
 +      attr = entry->mixed << 5;
 +      attr |= entry->endian;
 +      attr |= entry->elsz >> 3;
 +      attr <<= ((entry->pgsz & OMAP_MMU_CAM_PAGESIZE_4KB) ? 0:6);
 +
 +      return attr;
 +}
 +
 +struct omap_mmu_ops omap2_mmu_ops = {
 +      .startup        = omap2_mmu_startup,
 +      .shutdown       = omap2_mmu_shutdown,
 +      .read_tlb       = omap2_mmu_read_tlb,
 +      .load_tlb       = omap2_mmu_load_tlb,
 +      .show           = omap2_mmu_show,
 +      .cam_va         = omap2_mmu_cam_va,
 +      .cam_ram_alloc  = omap2_mmu_cam_ram_alloc,
 +      .cam_ram_valid  = omap2_mmu_cam_ram_valid,
 +      .interrupt      = omap2_mmu_interrupt,
 +      .pte_get_attr   = omap2_mmu_pte_get_attr,
 +};
 +EXPORT_SYMBOL_GPL(omap2_mmu_ops);
 +
 +MODULE_LICENSE("GPL");
@@@ -359,10 -345,10 +359,11 @@@ config CPU_XSC
  # ARMv6
  config CPU_V6
        bool "Support ARM V6 processor"
-       depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_OMAP2
+       depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_OMAP2 || ARCH_MX3
+       default y if ARCH_MX3
        select CPU_32v6
        select CPU_ABRT_EV6
 +      select CPU_PABRT_NOIFAR
        select CPU_CACHE_V6
        select CPU_CACHE_VIPT
        select CPU_CP15_MMU
index 633592a,0000000..b7a7ed3
mode 100644,000000..100644
--- /dev/null
@@@ -1,624 -1,0 +1,625 @@@
 +/*
 + * This file is part of OMAP DSP driver (DSP Gateway version 3.3.1)
 + *
 + * Copyright (C) 2002-2006 Nokia Corporation. All rights reserved.
 + *
 + * Contact: Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * version 2 as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful, but
 + * WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 + * 02110-1301 USA
 + *
 + */
 +
 +#include <linux/module.h>
 +#include <linux/init.h>
 +#include <linux/sched.h>
 +#include <linux/delay.h>
 +#include <linux/mm.h>
++#include <linux/err.h>
 +#include <linux/clk.h>
 +#include <linux/mutex.h>
 +#include <linux/interrupt.h>
 +#include <asm/io.h>
 +#include <asm/tlbflush.h>
 +#include <asm/irq.h>
 +#ifdef CONFIG_ARCH_OMAP1
 +#include <asm/arch/tc.h>
 +#endif
 +#include "dsp_common.h"
 +
 +#if defined(CONFIG_ARCH_OMAP1)
 +#define dsp_boot_config(mode) omap_writew((mode), MPUI_DSP_BOOT_CONFIG)
 +#endif
 +#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
 +#define dsp_boot_config(mode) writel((mode), DSP_IPI_DSPBOOTCONFIG)
 +#endif
 +
 +struct omap_dsp *omap_dsp;
 +
 +#if defined(CONFIG_ARCH_OMAP1)
 +struct clk *dsp_ck_handle;
 +struct clk *api_ck_handle;
 +#elif defined(CONFIG_ARCH_OMAP2)
 +struct clk *dsp_fck_handle;
 +struct clk *dsp_ick_handle;
 +#endif
 +dsp_long_t dspmem_base, dspmem_size,
 +         daram_base, daram_size,
 +         saram_base, saram_size;
 +
 +static struct cpustat {
 +      struct mutex lock;
 +      enum cpustat_e stat;
 +      enum cpustat_e req;
 +      u16 icrmask;
 +#ifdef CONFIG_ARCH_OMAP1
 +      struct {
 +              int mpui;
 +              int mem;
 +              int mem_delayed;
 +      } usecount;
 +      int (*mem_req_cb)(void);
 +      void (*mem_rel_cb)(void);
 +#endif
 +} cpustat = {
 +      .stat = CPUSTAT_RESET,
 +      .icrmask = 0xffff,
 +};
 +
 +int dsp_set_rstvect(dsp_long_t adr)
 +{
 +      unsigned long *dst_adr;
 +
 +      if (adr >= DSPSPACE_SIZE)
 +              return -EINVAL;
 +
 +      dst_adr = dspbyte_to_virt(DSP_BOOT_ADR_DIRECT);
 +      /* word swap */
 +      *dst_adr = ((adr & 0xffff) << 16) | (adr >> 16);
 +      /* fill 8 bytes! */
 +      *(dst_adr + 1) = 0;
 +      /* direct boot */
 +      dsp_boot_config(DSP_BOOT_CONFIG_DIRECT);
 +
 +      return 0;
 +}
 +
 +dsp_long_t dsp_get_rstvect(void)
 +{
 +      unsigned long *dst_adr;
 +
 +      dst_adr = dspbyte_to_virt(DSP_BOOT_ADR_DIRECT);
 +      return ((*dst_adr & 0xffff) << 16) | (*dst_adr >> 16);
 +}
 +
 +#ifdef CONFIG_ARCH_OMAP1
 +static void simple_load_code(unsigned char *src_c, u16 *dst, int len)
 +{
 +      int i;
 +      u16 *src = (u16 *)src_c;
 +      int len_w;
 +
 +      /* len must be multiple of 2. */
 +      if (len & 1)
 +              BUG();
 +
 +      len_w = len / 2;
 +      for (i = 0; i < len_w; i++) {
 +              /* byte swap copy */
 +              *dst = ((*src & 0x00ff) << 8) |
 +                     ((*src & 0xff00) >> 8);
 +              src++;
 +              dst++;
 +      }
 +}
 +
 +/* program size must be multiple of 2 */
 +#define GBL_IDLE_TEXT_SIZE    52
 +#define GBL_IDLE_TEXT_INIT { \
 +      /* SAM */ \
 +      0x3c, 0x4a,                     /* 0x3c4a:     MOV 0x4, AR2 */ \
 +      0xf4, 0x41, 0xfc, 0xff,         /* 0xf441fcff: AND 0xfcff, *AR2 */ \
 +      /* disable WDT */ \
 +      0x76, 0x34, 0x04, 0xb8,         /* 0x763404b8: MOV 0x3404, AR3 */ \
 +      0xfb, 0x61, 0x00, 0xf5,         /* 0xfb6100f5: MOV 0x00f5, *AR3 */ \
 +      0x9a,                           /* 0x9a:       PORT */ \
 +      0xfb, 0x61, 0x00, 0xa0,         /* 0xfb6100a0: MOV 0x00a0, *AR3 */ \
 +      0x9a,                           /* 0x9a:       PORT */ \
 +      /* *IER0 = 0, *IER1 = 0 */ \
 +      0x3c, 0x0b,                     /* 0x3c0b:     MOV 0x0, AR3 */ \
 +      0xe6, 0x61, 0x00,               /* 0xe66100:   MOV 0, *AR3 */ \
 +      0x76, 0x00, 0x45, 0xb8,         /* 0x76004508: MOV 0x45, AR3 */ \
 +      0xe6, 0x61, 0x00,               /* 0xe66100:   MOV 0, *AR3 */ \
 +      /* *ICR = 0xffff */ \
 +      0x3c, 0x1b,                     /* 0x3c1b:     MOV 0x1, AR3 */ \
 +      0xfb, 0x61, 0xff, 0xff,         /* 0xfb61ffff: MOV 0xffff, *AR3 */ \
 +      0x9a,                           /* 0x9a:       PORT */ \
 +      /* HOM */ \
 +      0xf5, 0x41, 0x03, 0x00,         /* 0xf5410300: OR 0x0300, *AR2 */ \
 +      /* idle and loop forever */ \
 +      0x7a, 0x00, 0x00, 0x0c,         /* 0x7a00000c: IDLE */ \
 +      0x4a, 0x7a,                     /* 0x4a7a:     B -6 (infinite loop) */ \
 +      0x20, 0x20, 0x20,               /* 0x20:       NOP */ \
 +}
 +
 +/* program size must be multiple of 2 */
 +#define CPU_IDLE_TEXT_SIZE    48
 +#define CPU_IDLE_TEXT_INIT(icrh, icrl) { \
 +      /* SAM */ \
 +      0x3c, 0x4b,                     /* 0x3c4b:     MOV 0x4, AR3 */ \
 +      0xf4, 0x61, 0xfc, 0xff,         /* 0xf461fcff: AND 0xfcff, *AR3 */ \
 +      /* disable WDT */ \
 +      0x76, 0x34, 0x04, 0xb8,         /* 0x763404b8: MOV 0x3404, AR3 */ \
 +      0xfb, 0x61, 0x00, 0xf5,         /* 0xfb6100f5: MOV 0x00f5, *AR3 */ \
 +      0x9a,                           /* 0x9a:       PORT */ \
 +      0xfb, 0x61, 0x00, 0xa0,         /* 0xfb6100a0: MOV 0x00a0, *AR3 */ \
 +      0x9a,                           /* 0x9a:       PORT */ \
 +      /* *IER0 = 0, *IER1 = 0 */ \
 +      0x3c, 0x0b,                     /* 0x3c0b:     MOV 0x0, AR3 */ \
 +      0xe6, 0x61, 0x00,               /* 0xe66100:   MOV 0, *AR3 */ \
 +      0x76, 0x00, 0x45, 0xb8,         /* 0x76004508: MOV 0x45, AR3 */ \
 +      0xe6, 0x61, 0x00,               /* 0xe66100:   MOV 0, *AR3 */ \
 +      /* set ICR = icr */ \
 +      0x3c, 0x1b,                     /* 0x3c1b:     MOV AR3 0x1 */ \
 +      0xfb, 0x61, (icrh), (icrl),     /* 0xfb61****: MOV *AR3, icr */ \
 +      0x9a,                           /* 0x9a:       PORT */ \
 +      /* idle and loop forever */ \
 +      0x7a, 0x00, 0x00, 0x0c,         /* 0x7a00000c: IDLE */ \
 +      0x4a, 0x7a,                     /* 0x4a7a:     B -6 (infinite loop) */ \
 +      0x20, 0x20, 0x20                /* 0x20: nop */ \
 +}
 +
 +/*
 + * idle_boot base:
 + * Initialized with DSP_BOOT_ADR_MPUI (=0x010000).
 + * This value is used before DSP Gateway driver is initialized.
 + * DSP Gateway driver will overwrite this value with other value,
 + * to avoid confliction with the user program.
 + */
 +static dsp_long_t idle_boot_base = DSP_BOOT_ADR_MPUI;
 +
 +static void dsp_gbl_idle(void)
 +{
 +      unsigned char idle_text[GBL_IDLE_TEXT_SIZE] = GBL_IDLE_TEXT_INIT;
 +
 +      __dsp_reset();
 +      clk_enable(api_ck_handle);
 +
 +#if 0
 +      dsp_boot_config(DSP_BOOT_CONFIG_IDLE);
 +#endif
 +      simple_load_code(idle_text, dspbyte_to_virt(idle_boot_base),
 +                       GBL_IDLE_TEXT_SIZE);
 +      if (idle_boot_base == DSP_BOOT_ADR_MPUI)
 +              dsp_boot_config(DSP_BOOT_CONFIG_MPUI);
 +      else
 +              dsp_set_rstvect(idle_boot_base);
 +
 +      __dsp_run();
 +      udelay(100);    /* to make things stable */
 +      clk_disable(api_ck_handle);
 +}
 +
 +static void dsp_cpu_idle(void)
 +{
 +      u16 icr_tmp;
 +      unsigned char icrh, icrl;
 +
 +      __dsp_reset();
 +      clk_enable(api_ck_handle);
 +
 +      /*
 +       * icr settings:
 +       * DMA should not sleep for DARAM/SARAM access
 +       * DPLL should not sleep while any other domain is active
 +       */
 +      icr_tmp = cpustat.icrmask & ~(DSPREG_ICR_DMA | DSPREG_ICR_DPLL);
 +      icrh = icr_tmp >> 8;
 +      icrl = icr_tmp & 0xff;
 +      {
 +              unsigned char idle_text[CPU_IDLE_TEXT_SIZE] = CPU_IDLE_TEXT_INIT(icrh, icrl);
 +              simple_load_code(idle_text, dspbyte_to_virt(idle_boot_base),
 +                               CPU_IDLE_TEXT_SIZE);
 +      }
 +      if (idle_boot_base == DSP_BOOT_ADR_MPUI)
 +              dsp_boot_config(DSP_BOOT_CONFIG_MPUI);
 +      else
 +              dsp_set_rstvect(idle_boot_base);
 +      __dsp_run();
 +      udelay(100);    /* to make things stable */
 +      clk_disable(api_ck_handle);
 +}
 +
 +void dsp_set_idle_boot_base(dsp_long_t adr, size_t size)
 +{
 +      if (adr == idle_boot_base)
 +              return;
 +      idle_boot_base = adr;
 +      if ((size < GBL_IDLE_TEXT_SIZE) ||
 +          (size < CPU_IDLE_TEXT_SIZE)) {
 +              printk(KERN_ERR
 +                     "omapdsp: size for idle program is not enough!\n");
 +              BUG();
 +      }
 +
 +      /* restart idle program with new base address */
 +      if (cpustat.stat == CPUSTAT_GBL_IDLE)
 +              dsp_gbl_idle();
 +      if (cpustat.stat == CPUSTAT_CPU_IDLE)
 +              dsp_cpu_idle();
 +}
 +
 +void dsp_reset_idle_boot_base(void)
 +{
 +      idle_boot_base = DSP_BOOT_ADR_MPUI;
 +}
 +#else
 +void dsp_reset_idle_boot_base(void) { }
 +#endif /* CONFIG_ARCH_OMAP1 */
 +
 +static int init_done;
 +
 +static int omap_dsp_init(void)
 +{
 +      mutex_init(&cpustat.lock);
 +
 +      dspmem_size = 0;
 +#ifdef CONFIG_ARCH_OMAP15XX
 +      if (cpu_is_omap15xx()) {
 +              dspmem_base = OMAP1510_DSP_BASE;
 +              dspmem_size = OMAP1510_DSP_SIZE;
 +              daram_base = OMAP1510_DARAM_BASE;
 +              daram_size = OMAP1510_DARAM_SIZE;
 +              saram_base = OMAP1510_SARAM_BASE;
 +              saram_size = OMAP1510_SARAM_SIZE;
 +      }
 +#endif
 +#ifdef CONFIG_ARCH_OMAP16XX
 +      if (cpu_is_omap16xx()) {
 +              dspmem_base = OMAP16XX_DSP_BASE;
 +              dspmem_size = OMAP16XX_DSP_SIZE;
 +              daram_base = OMAP16XX_DARAM_BASE;
 +              daram_size = OMAP16XX_DARAM_SIZE;
 +              saram_base = OMAP16XX_SARAM_BASE;
 +              saram_size = OMAP16XX_SARAM_SIZE;
 +      }
 +#endif
 +#ifdef CONFIG_ARCH_OMAP24XX
 +      if (cpu_is_omap24xx()) {
 +              dspmem_base = DSP_MEM_24XX_VIRT;
 +              dspmem_size = DSP_MEM_24XX_SIZE;
 +              daram_base = OMAP24XX_DARAM_BASE;
 +              daram_size = OMAP24XX_DARAM_SIZE;
 +              saram_base = OMAP24XX_SARAM_BASE;
 +              saram_size = OMAP24XX_SARAM_SIZE;
 +      }
 +#endif
 +#ifdef CONFIG_ARCH_OMAP34XX
 +      /* To be Revisited for 3430 */
 +      if (cpu_is_omap34xx()) {
 +              return -ENODEV;
 +      }
 +#endif
 +      if (dspmem_size == 0) {
 +              printk(KERN_ERR "omapdsp: unsupported omap architecture.\n");
 +              return -ENODEV;
 +      }
 +
 +#if defined(CONFIG_ARCH_OMAP1)
 +      dsp_ck_handle = clk_get(NULL, "dsp_ck");
 +      if (IS_ERR(dsp_ck_handle)) {
 +              printk(KERN_ERR "omapdsp: could not acquire dsp_ck handle.\n");
 +              return PTR_ERR(dsp_ck_handle);
 +      }
 +
 +      api_ck_handle = clk_get(NULL, "api_ck");
 +      if (IS_ERR(api_ck_handle)) {
 +              printk(KERN_ERR "omapdsp: could not acquire api_ck handle.\n");
 +              if (dsp_ck_handle != NULL)
 +                      clk_put(dsp_ck_handle);
 +              return PTR_ERR(api_ck_handle);
 +      }
 +
 +      /* This is needed for McBSP init, released in late_initcall */
 +      clk_enable(api_ck_handle);
 +
 +      __dsp_enable();
 +      mpui_byteswap_off();
 +      mpui_wordswap_on();
 +      tc_wordswap();
 +#elif defined(CONFIG_ARCH_OMAP2)
 +      dsp_fck_handle = clk_get(NULL, "dsp_fck");
 +      if (IS_ERR(dsp_fck_handle)) {
 +              printk(KERN_ERR "omapdsp: could not acquire dsp_fck handle.\n");
 +              return PTR_ERR(dsp_fck_handle);
 +      }
 +
 +      dsp_ick_handle = clk_get(NULL, "dsp_ick");
 +      if (IS_ERR(dsp_ick_handle)) {
 +              printk(KERN_ERR "omapdsp: could not acquire dsp_ick handle.\n");
 +              if (dsp_fck_handle != NULL)
 +                      clk_put(dsp_fck_handle);
 +              return PTR_ERR(dsp_ick_handle);
 +      }
 +#endif
 +
 +      init_done = 1;
 +      pr_info("omap_dsp_init() done\n");
 +      return 0;
 +}
 +
 +#if defined(CONFIG_ARCH_OMAP1)
 +static int __dsp_late_init(void)
 +{
 +      clk_disable(api_ck_handle);
 +      return 0;
 +}
 +late_initcall(__dsp_late_init);
 +#endif
 +
 +static void dsp_cpustat_update(void)
 +{
 +      if (!init_done)
 +              omap_dsp_init();
 +
 +      if (cpustat.req == CPUSTAT_RUN) {
 +              if (cpustat.stat < CPUSTAT_RUN) {
 +#if defined(CONFIG_ARCH_OMAP1)
 +                      __dsp_reset();
 +                      clk_enable(api_ck_handle);
 +                      udelay(10);
 +                      __dsp_run();
 +#elif defined(CONFIG_ARCH_OMAP2)
 +                      __dsp_core_disable();
 +                      udelay(10);
 +                      __dsp_core_enable();
 +#endif
 +                      cpustat.stat = CPUSTAT_RUN;
 +              }
 +              return;
 +      }
 +
 +      /* cpustat.req < CPUSTAT_RUN */
 +
 +      if (cpustat.stat == CPUSTAT_RUN) {
 +#ifdef CONFIG_ARCH_OMAP1
 +              clk_disable(api_ck_handle);
 +#endif
 +      }
 +
 +#ifdef CONFIG_ARCH_OMAP1
 +      /*
 +       * (1) when ARM wants DARAM access, MPUI should be SAM and
 +       *     DSP needs to be on.
 +       * (2) if any bits of icr is masked, we can not enter global idle.
 +       */
 +      if ((cpustat.req == CPUSTAT_CPU_IDLE) ||
 +          (cpustat.usecount.mem > 0) ||
 +          (cpustat.usecount.mem_delayed > 0) ||
 +          ((cpustat.usecount.mpui > 0) && (cpustat.icrmask != 0xffff))) {
 +              if (cpustat.stat != CPUSTAT_CPU_IDLE) {
 +                      dsp_cpu_idle();
 +                      cpustat.stat = CPUSTAT_CPU_IDLE;
 +              }
 +              return;
 +      }
 +
 +      /*
 +       * when ARM only needs MPUI access, MPUI can be HOM and
 +       * DSP can be idling.
 +       */
 +      if ((cpustat.req == CPUSTAT_GBL_IDLE) ||
 +          (cpustat.usecount.mpui > 0)) {
 +              if (cpustat.stat != CPUSTAT_GBL_IDLE) {
 +                      dsp_gbl_idle();
 +                      cpustat.stat = CPUSTAT_GBL_IDLE;
 +              }
 +              return;
 +      }
 +#endif /* CONFIG_ARCH_OMAP1 */
 +
 +      /*
 +       * no user, no request
 +       */
 +      if (cpustat.stat != CPUSTAT_RESET) {
 +#if defined(CONFIG_ARCH_OMAP1)
 +              __dsp_reset();
 +#elif defined(CONFIG_ARCH_OMAP2)
 +              __dsp_core_disable();
 +#endif
 +              cpustat.stat = CPUSTAT_RESET;
 +      }
 +}
 +
 +void dsp_cpustat_request(enum cpustat_e req)
 +{
 +      mutex_lock(&cpustat.lock);
 +      cpustat.req = req;
 +      dsp_cpustat_update();
 +      mutex_unlock(&cpustat.lock);
 +}
 +
 +enum cpustat_e dsp_cpustat_get_stat(void)
 +{
 +      return cpustat.stat;
 +}
 +
 +u16 dsp_cpustat_get_icrmask(void)
 +{
 +      return cpustat.icrmask;
 +}
 +
 +void dsp_cpustat_set_icrmask(u16 mask)
 +{
 +      mutex_lock(&cpustat.lock);
 +      cpustat.icrmask = mask;
 +      dsp_cpustat_update();
 +      mutex_unlock(&cpustat.lock);
 +}
 +
 +#ifdef CONFIG_ARCH_OMAP1
 +void omap_dsp_request_mpui(void)
 +{
 +      mutex_lock(&cpustat.lock);
 +      if (cpustat.usecount.mpui++ == 0)
 +              dsp_cpustat_update();
 +      mutex_unlock(&cpustat.lock);
 +}
 +
 +void omap_dsp_release_mpui(void)
 +{
 +      mutex_lock(&cpustat.lock);
 +      if (cpustat.usecount.mpui-- == 0) {
 +              printk(KERN_ERR
 +                     "omapdsp: unbalanced mpui request/release detected.\n"
 +                     "         cpustat.usecount.mpui is going to be "
 +                     "less than zero! ... fixed to be zero.\n");
 +              cpustat.usecount.mpui = 0;
 +      }
 +      if (cpustat.usecount.mpui == 0)
 +              dsp_cpustat_update();
 +      mutex_unlock(&cpustat.lock);
 +}
 +
 +int omap_dsp_request_mem(void)
 +{
 +      int ret = 0;
 +
 +      mutex_lock(&cpustat.lock);
 +      if ((cpustat.usecount.mem++ == 0) &&
 +          (cpustat.usecount.mem_delayed == 0)) {
 +              if (cpustat.mem_req_cb) {
 +                      if ((ret = cpustat.mem_req_cb()) < 0) {
 +                              cpustat.usecount.mem--;
 +                              goto out;
 +                      }
 +              }
 +              dsp_cpustat_update();
 +      }
 +out:
 +      mutex_unlock(&cpustat.lock);
 +
 +      return ret;
 +}
 +
 +/*
 + * release_mem will be delayed.
 + */
 +static void do_release_mem(struct work_struct *dummy)
 +{
 +      mutex_lock(&cpustat.lock);
 +      cpustat.usecount.mem_delayed = 0;
 +      if (cpustat.usecount.mem == 0) {
 +              dsp_cpustat_update();
 +              if (cpustat.mem_rel_cb)
 +                      cpustat.mem_rel_cb();
 +      }
 +      mutex_unlock(&cpustat.lock);
 +}
 +
 +static DECLARE_DELAYED_WORK(mem_rel_work, do_release_mem);
 +
 +int omap_dsp_release_mem(void)
 +{
 +      mutex_lock(&cpustat.lock);
 +
 +      /* cancel previous release work */
 +      cancel_delayed_work(&mem_rel_work);
 +      cpustat.usecount.mem_delayed = 0;
 +
 +      if (cpustat.usecount.mem-- == 0) {
 +              printk(KERN_ERR
 +                     "omapdsp: unbalanced memory request/release detected.\n"
 +                     "         cpustat.usecount.mem is going to be "
 +                     "less than zero! ... fixed to be zero.\n");
 +              cpustat.usecount.mem = 0;
 +      }
 +      if (cpustat.usecount.mem == 0) {
 +              cpustat.usecount.mem_delayed = 1;
 +              schedule_delayed_work(&mem_rel_work, HZ);
 +      }
 +
 +      mutex_unlock(&cpustat.lock);
 +
 +      return 0;
 +}
 +
 +void dsp_register_mem_cb(int (*req_cb)(void), void (*rel_cb)(void))
 +{
 +      mutex_lock(&cpustat.lock);
 +
 +      cpustat.mem_req_cb = req_cb;
 +      cpustat.mem_rel_cb = rel_cb;
 +
 +      /*
 +       * This function must be called while mem is enabled!
 +       */
 +      BUG_ON(cpustat.usecount.mem == 0);
 +
 +      mutex_unlock(&cpustat.lock);
 +}
 +
 +void dsp_unregister_mem_cb(void)
 +{
 +      mutex_lock(&cpustat.lock);
 +      cpustat.mem_req_cb = NULL;
 +      cpustat.mem_rel_cb = NULL;
 +      mutex_unlock(&cpustat.lock);
 +}
 +#else
 +void dsp_register_mem_cb(int (*req_cb)(void), void (*rel_cb)(void)) { }
 +void dsp_unregister_mem_cb(void) { }
 +#endif /* CONFIG_ARCH_OMAP1 */
 +
 +arch_initcall(omap_dsp_init);
 +
 +#ifdef CONFIG_ARCH_OMAP1
 +EXPORT_SYMBOL(omap_dsp_request_mpui);
 +EXPORT_SYMBOL(omap_dsp_release_mpui);
 +EXPORT_SYMBOL(omap_dsp_request_mem);
 +EXPORT_SYMBOL(omap_dsp_release_mem);
 +#endif /* CONFIG_ARCH_OMAP1 */
 +
 +#ifdef CONFIG_OMAP_DSP_MODULE
 +#if defined(CONFIG_ARCH_OMAP1)
 +EXPORT_SYMBOL(dsp_ck_handle);
 +EXPORT_SYMBOL(api_ck_handle);
 +#elif defined(CONFIG_ARCH_OMAP2)
 +EXPORT_SYMBOL(dsp_fck_handle);
 +EXPORT_SYMBOL(dsp_ick_handle);
 +#endif
 +EXPORT_SYMBOL(omap_dsp);
 +EXPORT_SYMBOL(dspmem_base);
 +EXPORT_SYMBOL(dspmem_size);
 +EXPORT_SYMBOL(daram_base);
 +EXPORT_SYMBOL(daram_size);
 +EXPORT_SYMBOL(saram_base);
 +EXPORT_SYMBOL(saram_size);
 +EXPORT_SYMBOL(dsp_set_rstvect);
 +EXPORT_SYMBOL(dsp_get_rstvect);
 +#ifdef CONFIG_ARCH_OMAP1
 +EXPORT_SYMBOL(dsp_set_idle_boot_base);
 +EXPORT_SYMBOL(dsp_reset_idle_boot_base);
 +#endif /* CONFIG_ARCH_OMAP1 */
 +EXPORT_SYMBOL(dsp_cpustat_request);
 +EXPORT_SYMBOL(dsp_cpustat_get_stat);
 +EXPORT_SYMBOL(dsp_cpustat_get_icrmask);
 +EXPORT_SYMBOL(dsp_cpustat_set_icrmask);
 +EXPORT_SYMBOL(dsp_register_mem_cb);
 +EXPORT_SYMBOL(dsp_unregister_mem_cb);
 +
 +EXPORT_SYMBOL(__cpu_flush_kern_tlb_range);
 +EXPORT_SYMBOL(cpu_architecture);
 +EXPORT_SYMBOL(pmd_clear_bad);
 +#endif
index 30c646e,0000000..01dfedd
mode 100644,000000..100644
--- /dev/null
@@@ -1,1562 -1,0 +1,1563 @@@
 +/*
 + * linux/arch/arm/plat-omap/mmu.c
 + *
 + * OMAP MMU management framework
 + *
 + * Copyright (C) 2002-2006 Nokia Corporation
 + *
 + * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
 + *        and Paul Mundt <lethal@linux-sh.org>
 + *
 + * TWL support: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation; either version 2 of the License, or
 + * (at your option) any later version.
 + *
 + * This program is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 + * GNU General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 + */
 +#include <linux/module.h>
 +#include <linux/mempool.h>
 +#include <linux/init.h>
 +#include <linux/delay.h>
++#include <linux/err.h>
 +#include <linux/clk.h>
 +#include <linux/device.h>
 +#include <linux/interrupt.h>
 +#include <asm/uaccess.h>
 +#include <asm/io.h>
 +#include <asm/pgalloc.h>
 +#include <asm/pgtable.h>
 +#include <asm/arch/mmu.h>
 +#include <asm/sizes.h>
 +
 +#if defined(CONFIG_ARCH_OMAP1)
 +#include "../mach-omap1/mmu.h"
 +#elif defined(CONFIG_ARCH_OMAP2)
 +#include "../mach-omap2/mmu.h"
 +#endif
 +
 +/*
 + * On OMAP2 MMU_LOCK_xxx_MASK only applies to the IVA and DSP, the camera
 + * MMU has base and victim implemented in different bits in the LOCK
 + * register (shifts are still the same), all of the other registers are
 + * the same on all of the MMUs..
 + */
 +#define MMU_LOCK_BASE_SHIFT           10
 +#define MMU_LOCK_VICTIM_SHIFT         4
 +
 +#define CAMERA_MMU_LOCK_BASE_MASK     (0x7 << MMU_LOCK_BASE_SHIFT)
 +#define CAMERA_MMU_LOCK_VICTIM_MASK   (0x7 << MMU_LOCK_VICTIM_SHIFT)
 +
 +#define is_aligned(adr,align) (!((adr)&((align)-1)))
 +#define ORDER_1MB     (20 - PAGE_SHIFT)
 +#define ORDER_64KB    (16 - PAGE_SHIFT)
 +#define ORDER_4KB     (12 - PAGE_SHIFT)
 +
 +#define MMU_CNTL_EMUTLBUPDATE (1<<3)
 +#define MMU_CNTL_TWLENABLE    (1<<2)
 +#define MMU_CNTL_MMUENABLE    (1<<1)
 +
 +static mempool_t *mempool_1M;
 +static mempool_t *mempool_64K;
 +
 +#define omap_mmu_for_each_tlb_entry(mmu, entry)                       \
 +      for (entry = mmu->exmap_tbl; prefetch(entry + 1),       \
 +           entry < (mmu->exmap_tbl + mmu->nr_tlb_entries);    \
 +           entry++)
 +
 +#define to_dev(obj)   container_of(obj, struct device, kobj)
 +
 +static void *mempool_alloc_from_pool(mempool_t *pool,
 +                                   unsigned int __nocast gfp_mask)
 +{
 +      spin_lock_irq(&pool->lock);
 +      if (likely(pool->curr_nr)) {
 +              void *element = pool->elements[--pool->curr_nr];
 +              spin_unlock_irq(&pool->lock);
 +              return element;
 +      }
 +
 +      spin_unlock_irq(&pool->lock);
 +      return mempool_alloc(pool, gfp_mask);
 +}
 +
 +/*
 + * kmem_reserve(), kmem_release():
 + * reserve or release kernel memory for exmap().
 + *
 + * exmap() might request consecutive 1MB or 64kB,
 + * but it will be difficult after memory pages are fragmented.
 + * So, user can reserve such memory blocks in the early phase
 + * through kmem_reserve().
 + */
 +static void *omap_mmu_pool_alloc(unsigned int __nocast gfp, void *order)
 +{
 +      return (void *)__get_dma_pages(gfp, (unsigned int)order);
 +}
 +
 +static void omap_mmu_pool_free(void *buf, void *order)
 +{
 +      free_pages((unsigned long)buf, (unsigned int)order);
 +}
 +
 +int omap_mmu_kmem_reserve(struct omap_mmu *mmu, unsigned long size)
 +{
 +      unsigned long len = size;
 +
 +      /* alignment check */
 +      if (!is_aligned(size, SZ_64K)) {
 +              printk(KERN_ERR
 +                     "omapdsp: size(0x%lx) is not multiple of 64KB.\n", size);
 +              return -EINVAL;
 +      }
 +
 +      if (size > (1 << mmu->addrspace)) {
 +              printk(KERN_ERR
 +                     "omapdsp: size(0x%lx) is larger than DSP memory space "
 +                     "size (0x%x.\n", size, (1 << mmu->addrspace));
 +              return -EINVAL;
 +      }
 +
 +      if (size >= SZ_1M) {
 +              int nr = size >> 20;
 +
 +              if (likely(!mempool_1M))
 +                      mempool_1M = mempool_create(nr, omap_mmu_pool_alloc,
 +                                                  omap_mmu_pool_free,
 +                                                  (void *)ORDER_1MB);
 +              else
 +                      mempool_resize(mempool_1M, mempool_1M->min_nr + nr,
 +                                     GFP_KERNEL);
 +
 +              size &= ~(0xf << 20);
 +      }
 +
 +      if (size >= SZ_64K) {
 +              int nr = size >> 16;
 +
 +              if (likely(!mempool_64K))
 +                      mempool_64K = mempool_create(nr, omap_mmu_pool_alloc,
 +                                                   omap_mmu_pool_free,
 +                                                   (void *)ORDER_64KB);
 +              else
 +                      mempool_resize(mempool_64K, mempool_64K->min_nr + nr,
 +                                     GFP_KERNEL);
 +
 +              size &= ~(0xf << 16);
 +      }
 +
 +      if (size)
 +              len -= size;
 +
 +      return len;
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_kmem_reserve);
 +
 +void omap_mmu_kmem_release(void)
 +{
 +      if (mempool_64K) {
 +              mempool_destroy(mempool_64K);
 +              mempool_64K = NULL;
 +      }
 +
 +      if (mempool_1M) {
 +              mempool_destroy(mempool_1M);
 +              mempool_1M = NULL;
 +      }
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_kmem_release);
 +
 +static void omap_mmu_free_pages(unsigned long buf, unsigned int order)
 +{
 +      struct page *page, *ps, *pe;
 +
 +      ps = virt_to_page(buf);
 +      pe = virt_to_page(buf + (1 << (PAGE_SHIFT + order)));
 +
 +      for (page = ps; page < pe; page++)
 +              ClearPageReserved(page);
 +
 +      if ((order == ORDER_64KB) && likely(mempool_64K))
 +              mempool_free((void *)buf, mempool_64K);
 +      else if ((order == ORDER_1MB) && likely(mempool_1M))
 +              mempool_free((void *)buf, mempool_1M);
 +      else
 +              free_pages(buf, order);
 +}
 +
 +/*
 + * ARM MMU operations
 + */
 +int exmap_set_armmmu(unsigned long virt, unsigned long phys, unsigned long size)
 +{
 +      long off;
 +      unsigned long sz_left;
 +      pmd_t *pmdp;
 +      pte_t *ptep;
 +      int prot_pmd, prot_pte;
 +
 +      printk(KERN_DEBUG
 +             "MMU: mapping in ARM MMU, v=0x%08lx, p=0x%08lx, sz=0x%lx\n",
 +             virt, phys, size);
 +
 +      prot_pmd = PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_IO);
 +      prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE;
 +
 +      pmdp = pmd_offset(pgd_offset_k(virt), virt);
 +      if (pmd_none(*pmdp)) {
 +              ptep = pte_alloc_one_kernel(&init_mm, 0);
 +              if (ptep == NULL)
 +                      return -ENOMEM;
 +              /* note: two PMDs will be set  */
 +              pmd_populate_kernel(&init_mm, pmdp, ptep);
 +      }
 +
 +      off = phys - virt;
 +      for (sz_left = size;
 +           sz_left >= PAGE_SIZE;
 +           sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
 +              ptep = pte_offset_kernel(pmdp, virt);
 +              set_pte_ext(ptep, __pte((virt + off) | prot_pte), 0);
 +      }
 +      if (sz_left)
 +              BUG();
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(exmap_set_armmmu);
 +
 +void exmap_clear_armmmu(unsigned long virt, unsigned long size)
 +{
 +      unsigned long sz_left;
 +      pmd_t *pmdp;
 +      pte_t *ptep;
 +
 +      printk(KERN_DEBUG
 +             "MMU: unmapping in ARM MMU, v=0x%08lx, sz=0x%lx\n",
 +             virt, size);
 +
 +      for (sz_left = size;
 +           sz_left >= PAGE_SIZE;
 +           sz_left -= PAGE_SIZE, virt += PAGE_SIZE) {
 +              pmdp = pmd_offset(pgd_offset_k(virt), virt);
 +              ptep = pte_offset_kernel(pmdp, virt);
 +              pte_clear(&init_mm, virt, ptep);
 +      }
 +      if (sz_left)
 +              BUG();
 +}
 +EXPORT_SYMBOL_GPL(exmap_clear_armmmu);
 +
 +int exmap_valid(struct omap_mmu *mmu, void *vadr, size_t len)
 +{
 +      /* exmap_sem should be held before calling this function */
 +      struct exmap_tbl *ent;
 +
 +start:
 +      omap_mmu_for_each_tlb_entry(mmu, ent) {
 +              void *mapadr;
 +              unsigned long mapsize;
 +
 +              if (!ent->valid)
 +                      continue;
 +              mapadr = (void *)ent->vadr;
 +              mapsize = 1 << (ent->order + PAGE_SHIFT);
 +              if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
 +                      if (vadr + len <= mapadr + mapsize) {
 +                              /* this map covers whole address. */
 +                              return 1;
 +                      } else {
 +                              /*
 +                               * this map covers partially.
 +                               * check rest portion.
 +                               */
 +                              len -= mapadr + mapsize - vadr;
 +                              vadr = mapadr + mapsize;
 +                              goto start;
 +                      }
 +              }
 +      }
 +
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(exmap_valid);
 +
 +/*
 + * omap_mmu_exmap_use(), unuse():
 + * when the mapped area is exported to user space with mmap,
 + * the usecount is incremented.
 + * while the usecount > 0, that area can't be released.
 + */
 +void omap_mmu_exmap_use(struct omap_mmu *mmu, void *vadr, size_t len)
 +{
 +      struct exmap_tbl *ent;
 +
 +      down_write(&mmu->exmap_sem);
 +      omap_mmu_for_each_tlb_entry(mmu, ent) {
 +              void *mapadr;
 +              unsigned long mapsize;
 +
 +              if (!ent->valid)
 +                      continue;
 +              mapadr = (void *)ent->vadr;
 +              mapsize = 1 << (ent->order + PAGE_SHIFT);
 +              if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
 +                      ent->usecount++;
 +      }
 +      up_write(&mmu->exmap_sem);
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_exmap_use);
 +
 +void omap_mmu_exmap_unuse(struct omap_mmu *mmu, void *vadr, size_t len)
 +{
 +      struct exmap_tbl *ent;
 +
 +      down_write(&mmu->exmap_sem);
 +      omap_mmu_for_each_tlb_entry(mmu, ent) {
 +              void *mapadr;
 +              unsigned long mapsize;
 +
 +              if (!ent->valid)
 +                      continue;
 +              mapadr = (void *)ent->vadr;
 +              mapsize = 1 << (ent->order + PAGE_SHIFT);
 +              if ((vadr + len > mapadr) && (vadr < mapadr + mapsize))
 +                      ent->usecount--;
 +      }
 +      up_write(&mmu->exmap_sem);
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_exmap_unuse);
 +
 +/*
 + * omap_mmu_virt_to_phys()
 + * returns physical address, and sets len to valid length
 + */
 +unsigned long
 +omap_mmu_virt_to_phys(struct omap_mmu *mmu, void *vadr, size_t *len)
 +{
 +      struct exmap_tbl *ent;
 +
 +      if (omap_mmu_internal_memory(mmu, vadr)) {
 +              unsigned long addr = (unsigned long)vadr;
 +              *len = mmu->membase + mmu->memsize - addr;
 +              return addr;
 +      }
 +
 +      /* EXRAM */
 +      omap_mmu_for_each_tlb_entry(mmu, ent) {
 +              void *mapadr;
 +              unsigned long mapsize;
 +
 +              if (!ent->valid)
 +                      continue;
 +              mapadr = (void *)ent->vadr;
 +              mapsize = 1 << (ent->order + PAGE_SHIFT);
 +              if ((vadr >= mapadr) && (vadr < mapadr + mapsize)) {
 +                      *len = mapadr + mapsize - vadr;
 +                      return __pa(ent->buf) + vadr - mapadr;
 +              }
 +      }
 +
 +      /* valid mapping not found */
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_virt_to_phys);
 +
 +/*
 + * PTE operations
 + */
 +static inline void
 +omap_mmu_alloc_section(struct mm_struct *mm, unsigned long virt,
 +                     unsigned long phys, int prot)
 +{
 +      pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
 +      if (virt & (1 << SECTION_SHIFT))
 +              pmdp++;
 +      *pmdp = __pmd((phys & SECTION_MASK) | prot | PMD_TYPE_SECT);
 +      flush_pmd_entry(pmdp);
 +}
 +
 +static inline void
 +omap_mmu_alloc_supersection(struct mm_struct *mm, unsigned long virt,
 +                          unsigned long phys, int prot)
 +{
 +      int i;
 +      for (i = 0; i < 16; i += 1) {
 +              omap_mmu_alloc_section(mm, virt, phys, prot | PMD_SECT_SUPER);
 +              virt += (PGDIR_SIZE / 2);
 +      }
 +}
 +
 +static inline int
 +omap_mmu_alloc_page(struct mm_struct *mm, unsigned long virt,
 +                  unsigned long phys, pgprot_t prot)
 +{
 +      pte_t *ptep;
 +      pmd_t *pmdp = pmd_offset(pgd_offset(mm, virt), virt);
 +
 +      if (!(prot & PTE_TYPE_MASK))
 +              prot |= PTE_TYPE_SMALL;
 +
 +      if (pmd_none(*pmdp)) {
 +              ptep = pte_alloc_one_kernel(mm, virt);
 +              if (ptep == NULL)
 +                      return -ENOMEM;
 +              pmd_populate_kernel(mm, pmdp, ptep);
 +      }
 +      ptep = pte_offset_kernel(pmdp, virt);
 +      ptep -= PTRS_PER_PTE;
 +      *ptep = pfn_pte(phys >> PAGE_SHIFT, prot);
 +      flush_pmd_entry((pmd_t *)ptep);
 +      return 0;
 +}
 +
 +static inline int
 +omap_mmu_alloc_largepage(struct mm_struct *mm, unsigned long virt,
 +                       unsigned long phys, pgprot_t prot)
 +{
 +      int i, ret;
 +      for (i = 0; i < 16; i += 1) {
 +              ret = omap_mmu_alloc_page(mm, virt, phys,
 +                                        prot | PTE_TYPE_LARGE);
 +              if (ret)
 +                      return -ENOMEM; /* only 1st time */
 +              virt += PAGE_SIZE;
 +      }
 +      return 0;
 +}
 +
 +static int omap_mmu_load_pte(struct omap_mmu *mmu,
 +                           struct omap_mmu_tlb_entry *e)
 +{
 +      int ret = 0;
 +      struct mm_struct *mm = mmu->twl_mm;
 +      const unsigned long va = e->va;
 +      const unsigned long pa = e->pa;
 +      const pgprot_t prot = mmu->ops->pte_get_attr(e);
 +
 +      spin_lock(&mm->page_table_lock);
 +
 +      switch (e->pgsz) {
 +      case OMAP_MMU_CAM_PAGESIZE_16MB:
 +              omap_mmu_alloc_supersection(mm, va, pa, prot);
 +              break;
 +      case OMAP_MMU_CAM_PAGESIZE_1MB:
 +              omap_mmu_alloc_section(mm, va, pa, prot);
 +              break;
 +      case OMAP_MMU_CAM_PAGESIZE_64KB:
 +              ret = omap_mmu_alloc_largepage(mm, va, pa, prot);
 +              break;
 +      case OMAP_MMU_CAM_PAGESIZE_4KB:
 +              ret = omap_mmu_alloc_page(mm, va, pa, prot);
 +              break;
 +      default:
 +              BUG();
 +              break;
 +      }
 +
 +      spin_unlock(&mm->page_table_lock);
 +
 +      return ret;
 +}
 +
 +static void omap_mmu_clear_pte(struct omap_mmu *mmu, unsigned long virt)
 +{
 +      pte_t *ptep, *end;
 +      pmd_t *pmdp;
 +      struct mm_struct *mm = mmu->twl_mm;
 +
 +      spin_lock(&mm->page_table_lock);
 +
 +      pmdp = pmd_offset(pgd_offset(mm, virt), virt);
 +
 +      if (pmd_none(*pmdp))
 +              goto out;
 +
 +      if (!pmd_table(*pmdp))
 +              goto invalidate_pmd;
 +
 +      ptep = pte_offset_kernel(pmdp, virt);
 +      pte_clear(mm, virt, ptep);
 +      flush_pmd_entry((pmd_t *)ptep);
 +
 +      /* zap pte */
 +      end = pmd_page_vaddr(*pmdp);
 +      ptep = end - PTRS_PER_PTE;
 +      while (ptep < end) {
 +              if (!pte_none(*ptep))
 +                      goto out;
 +              ptep++;
 +      }
 +      pte_free_kernel(pmd_page_vaddr(*pmdp));
 +
 + invalidate_pmd:
 +      pmd_clear(pmdp);
 +      flush_pmd_entry(pmdp);
 + out:
 +      spin_unlock(&mm->page_table_lock);
 +}
 +
 +/*
 + * TLB operations
 + */
 +static struct cam_ram_regset *
 +omap_mmu_cam_ram_alloc(struct omap_mmu *mmu, struct omap_mmu_tlb_entry *entry)
 +{
 +      return mmu->ops->cam_ram_alloc(entry);
 +}
 +
 +static int omap_mmu_cam_ram_valid(struct omap_mmu *mmu,
 +                                struct cam_ram_regset *cr)
 +{
 +      return mmu->ops->cam_ram_valid(cr);
 +}
 +
 +static inline void
 +omap_mmu_get_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *tlb_lock)
 +{
 +      unsigned long lock = omap_mmu_read_reg(mmu, MMU_LOCK);
 +      int mask;
 +
 +      mask = (mmu->type == OMAP_MMU_CAMERA) ?
 +                      CAMERA_MMU_LOCK_BASE_MASK : MMU_LOCK_BASE_MASK;
 +      tlb_lock->base = (lock & mask) >> MMU_LOCK_BASE_SHIFT;
 +
 +      mask = (mmu->type == OMAP_MMU_CAMERA) ?
 +                      CAMERA_MMU_LOCK_VICTIM_MASK : MMU_LOCK_VICTIM_MASK;
 +      tlb_lock->victim = (lock & mask) >> MMU_LOCK_VICTIM_SHIFT;
 +}
 +
 +static inline void
 +omap_mmu_set_tlb_lock(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock)
 +{
 +      omap_mmu_write_reg(mmu,
 +                         (lock->base << MMU_LOCK_BASE_SHIFT) |
 +                         (lock->victim << MMU_LOCK_VICTIM_SHIFT), MMU_LOCK);
 +}
 +
 +static inline void omap_mmu_flush(struct omap_mmu *mmu)
 +{
 +      omap_mmu_write_reg(mmu, 0x1, MMU_FLUSH_ENTRY);
 +}
 +
 +static inline void omap_mmu_ldtlb(struct omap_mmu *mmu)
 +{
 +      omap_mmu_write_reg(mmu, 0x1, MMU_LD_TLB);
 +}
 +
 +void omap_mmu_read_tlb(struct omap_mmu *mmu, struct omap_mmu_tlb_lock *lock,
 +                     struct cam_ram_regset *cr)
 +{
 +      /* set victim */
 +      omap_mmu_set_tlb_lock(mmu, lock);
 +
 +      if (likely(mmu->ops->read_tlb))
 +              mmu->ops->read_tlb(mmu, cr);
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_read_tlb);
 +
 +void omap_mmu_load_tlb(struct omap_mmu *mmu, struct cam_ram_regset *cr)
 +{
 +      if (likely(mmu->ops->load_tlb))
 +              mmu->ops->load_tlb(mmu, cr);
 +
 +      /* flush the entry */
 +      omap_mmu_flush(mmu);
 +
 +      /* load a TLB entry */
 +      omap_mmu_ldtlb(mmu);
 +}
 +
 +int omap_mmu_load_tlb_entry(struct omap_mmu *mmu,
 +                          struct omap_mmu_tlb_entry *entry)
 +{
 +      struct omap_mmu_tlb_lock lock;
 +      struct cam_ram_regset *cr;
 +
 +      clk_enable(mmu->clk);
 +      omap_dsp_request_mem();
 +
 +      omap_mmu_get_tlb_lock(mmu, &lock);
 +      for (lock.victim = 0; lock.victim < lock.base; lock.victim++) {
 +              struct cam_ram_regset tmp;
 +
 +              /* read a TLB entry */
 +              omap_mmu_read_tlb(mmu, &lock, &tmp);
 +              if (!omap_mmu_cam_ram_valid(mmu, &tmp))
 +                      goto found_victim;
 +      }
 +      omap_mmu_set_tlb_lock(mmu, &lock);
 +
 +found_victim:
 +      /* The last entry cannot be locked? */
 +      if (lock.victim == (mmu->nr_tlb_entries - 1)) {
 +              printk(KERN_ERR "MMU: TLB is full.\n");
 +              return -EBUSY;
 +      }
 +
 +      cr = omap_mmu_cam_ram_alloc(mmu, entry);
 +      if (IS_ERR(cr))
 +              return PTR_ERR(cr);
 +
 +      omap_mmu_load_tlb(mmu, cr);
 +      kfree(cr);
 +
 +      /* update lock base */
 +      if (lock.victim == lock.base)
 +              lock.base++;
 +
 +      omap_mmu_set_tlb_lock(mmu, &lock);
 +
 +      omap_dsp_release_mem();
 +      clk_disable(mmu->clk);
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_load_tlb_entry);
 +
 +static inline unsigned long
 +omap_mmu_cam_va(struct omap_mmu *mmu, struct cam_ram_regset *cr)
 +{
 +      return mmu->ops->cam_va(cr);
 +}
 +
 +int omap_mmu_clear_tlb_entry(struct omap_mmu *mmu, unsigned long vadr)
 +{
 +      struct omap_mmu_tlb_lock lock;
 +      int i;
 +      int max_valid = 0;
 +
 +      clk_enable(mmu->clk);
 +      omap_dsp_request_mem();
 +
 +      omap_mmu_get_tlb_lock(mmu, &lock);
 +      for (i = 0; i < lock.base; i++) {
 +              struct cam_ram_regset cr;
 +
 +              /* read a TLB entry */
 +              lock.victim = i;
 +              omap_mmu_read_tlb(mmu, &lock, &cr);
 +              if (!omap_mmu_cam_ram_valid(mmu, &cr))
 +                      continue;
 +
 +              if (omap_mmu_cam_va(mmu, &cr) == vadr)
 +                      /* flush the entry */
 +                      omap_mmu_flush(mmu);
 +              else
 +                      max_valid = i;
 +      }
 +
 +      /* set new lock base */
 +      lock.base = lock.victim = max_valid + 1;
 +      omap_mmu_set_tlb_lock(mmu, &lock);
 +
 +      omap_dsp_release_mem();
 +      clk_disable(mmu->clk);
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_clear_tlb_entry);
 +
 +static void omap_mmu_gflush(struct omap_mmu *mmu)
 +{
 +      struct omap_mmu_tlb_lock lock;
 +
 +      clk_enable(mmu->clk);
 +      omap_dsp_request_mem();
 +
 +      omap_mmu_write_reg(mmu, 0x1, MMU_GFLUSH);
 +      lock.base = lock.victim = mmu->nr_exmap_preserved;
 +      omap_mmu_set_tlb_lock(mmu, &lock);
 +
 +      omap_dsp_release_mem();
 +      clk_disable(mmu->clk);
 +}
 +
 +int omap_mmu_load_pte_entry(struct omap_mmu *mmu,
 +                          struct omap_mmu_tlb_entry *entry)
 +{
 +      int ret = -1;
 +      if ((!entry->prsvd) && (mmu->ops->pte_get_attr)) {
 +              /*XXX use PG_flag for prsvd */
 +              ret = omap_mmu_load_pte(mmu, entry);
 +              if (ret)
 +                      return ret;
 +      }
 +      if (entry->tlb)
 +              ret = omap_mmu_load_tlb_entry(mmu, entry);
 +      return ret;
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_load_pte_entry);
 +
 +int omap_mmu_clear_pte_entry(struct omap_mmu *mmu, unsigned long vadr)
 +{
 +      int ret = omap_mmu_clear_tlb_entry(mmu, vadr);
 +      if (ret)
 +              return ret;
 +      if (mmu->ops->pte_get_attr)
 +              omap_mmu_clear_pte(mmu, vadr);
 +      return ret;
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_clear_pte_entry);
 +
 +/*
 + * omap_mmu_exmap()
 + *
 + * MEM_IOCTL_EXMAP ioctl calls this function with padr=0.
 + * In this case, the buffer for DSP is allocated in this routine,
 + * then it is mapped.
 + * On the other hand, for example - frame buffer sharing, calls
 + * this function with padr set. It means some known address space
 + * pointed with padr is going to be shared with DSP.
 + */
 +int omap_mmu_exmap(struct omap_mmu *mmu, unsigned long dspadr,
 +                 unsigned long padr, unsigned long size,
 +                 enum exmap_type type)
 +{
 +      unsigned long pgsz;
 +      void *buf;
 +      unsigned int order = 0;
 +      unsigned long unit;
 +      int prev = -1;
 +      unsigned long _dspadr = dspadr;
 +      unsigned long _padr = padr;
 +      void *_vadr = omap_mmu_to_virt(mmu, dspadr);
 +      unsigned long _size = size;
 +      struct omap_mmu_tlb_entry tlb_ent;
 +      struct exmap_tbl *exmap_ent, *tmp_ent;
 +      int status;
 +      int idx;
 +
 +#define MINIMUM_PAGESZ        SZ_4K
 +      /*
 +       * alignment check
 +       */
 +      if (!is_aligned(size, MINIMUM_PAGESZ)) {
 +              printk(KERN_ERR
 +                     "MMU: size(0x%lx) is not multiple of 4KB.\n", size);
 +              return -EINVAL;
 +      }
 +      if (!is_aligned(dspadr, MINIMUM_PAGESZ)) {
 +              printk(KERN_ERR
 +                     "MMU: DSP address(0x%lx) is not aligned.\n", dspadr);
 +              return -EINVAL;
 +      }
 +      if (!is_aligned(padr, MINIMUM_PAGESZ)) {
 +              printk(KERN_ERR
 +                     "MMU: physical address(0x%lx) is not aligned.\n",
 +                     padr);
 +              return -EINVAL;
 +      }
 +
 +      /* address validity check */
 +      if ((dspadr < mmu->memsize) ||
 +          (dspadr >= (1 << mmu->addrspace))) {
 +              printk(KERN_ERR
 +                     "MMU: illegal address/size for %s().\n",
 +                     __FUNCTION__);
 +              return -EINVAL;
 +      }
 +
 +      down_write(&mmu->exmap_sem);
 +
 +      /* overlap check */
 +      omap_mmu_for_each_tlb_entry(mmu, tmp_ent) {
 +              unsigned long mapsize;
 +
 +              if (!tmp_ent->valid)
 +                      continue;
 +              mapsize = 1 << (tmp_ent->order + PAGE_SHIFT);
 +              if ((_vadr + size > tmp_ent->vadr) &&
 +                  (_vadr < tmp_ent->vadr + mapsize)) {
 +                      printk(KERN_ERR "MMU: exmap page overlap!\n");
 +                      up_write(&mmu->exmap_sem);
 +                      return -EINVAL;
 +              }
 +      }
 +
 +start:
 +      buf = NULL;
 +      /* Are there any free TLB lines?  */
 +      for (idx = 0; idx < mmu->nr_tlb_entries; idx++)
 +              if (!mmu->exmap_tbl[idx].valid)
 +                      goto found_free;
 +
 +      printk(KERN_ERR "MMU: DSP TLB is full.\n");
 +      status = -EBUSY;
 +      goto fail;
 +
 +found_free:
 +      exmap_ent = mmu->exmap_tbl + idx;
 +
 +      if ((_size >= SZ_1M) &&
 +          (is_aligned(_padr, SZ_1M) || (padr == 0)) &&
 +          is_aligned(_dspadr, SZ_1M)) {
 +              unit = SZ_1M;
 +              pgsz = OMAP_MMU_CAM_PAGESIZE_1MB;
 +      } else if ((_size >= SZ_64K) &&
 +                 (is_aligned(_padr, SZ_64K) || (padr == 0)) &&
 +                 is_aligned(_dspadr, SZ_64K)) {
 +              unit = SZ_64K;
 +              pgsz = OMAP_MMU_CAM_PAGESIZE_64KB;
 +      } else {
 +              unit = SZ_4K;
 +              pgsz = OMAP_MMU_CAM_PAGESIZE_4KB;
 +      }
 +
 +      order = get_order(unit);
 +
 +      /* buffer allocation */
 +      if (type == EXMAP_TYPE_MEM) {
 +              struct page *page, *ps, *pe;
 +
 +              if ((order == ORDER_1MB) && likely(mempool_1M))
 +                      buf = mempool_alloc_from_pool(mempool_1M, GFP_KERNEL);
 +              else if ((order == ORDER_64KB) && likely(mempool_64K))
 +                      buf = mempool_alloc_from_pool(mempool_64K, GFP_KERNEL);
 +              else {
 +                      buf = (void *)__get_dma_pages(GFP_KERNEL, order);
 +                      if (buf == NULL) {
 +                              status = -ENOMEM;
 +                              goto fail;
 +                      }
 +              }
 +
 +              /* mark the pages as reserved; this is needed for mmap */
 +              ps = virt_to_page(buf);
 +              pe = virt_to_page(buf + unit);
 +
 +              for (page = ps; page < pe; page++)
 +                      SetPageReserved(page);
 +
 +              _padr = __pa(buf);
 +      }
 +
 +      /*
 +       * mapping for ARM MMU:
 +       * we should not access to the allocated memory through 'buf'
 +       * since this area should not be cached.
 +       */
 +      status = exmap_set_armmmu((unsigned long)_vadr, _padr, unit);
 +      if (status < 0)
 +              goto fail;
 +
 +      /* loading DSP PTE entry */
 +      INIT_TLB_ENTRY(&tlb_ent, _dspadr, _padr, pgsz);
 +      status = omap_mmu_load_pte_entry(mmu, &tlb_ent);
 +      if (status < 0) {
 +              exmap_clear_armmmu((unsigned long)_vadr, unit);
 +              goto fail;
 +      }
 +
 +      INIT_EXMAP_TBL_ENTRY(exmap_ent, buf, _vadr, type, order);
 +      exmap_ent->link.prev = prev;
 +      if (prev >= 0)
 +              mmu->exmap_tbl[prev].link.next = idx;
 +
 +      if ((_size -= unit) == 0) {     /* normal completion */
 +              up_write(&mmu->exmap_sem);
 +              return size;
 +      }
 +
 +      _dspadr += unit;
 +      _vadr   += unit;
 +      _padr = padr ? _padr + unit : 0;
 +      prev = idx;
 +      goto start;
 +
 +fail:
 +      up_write(&mmu->exmap_sem);
 +      if (buf)
 +              omap_mmu_free_pages((unsigned long)buf, order);
 +      omap_mmu_exunmap(mmu, dspadr);
 +      return status;
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_exmap);
 +
 +static unsigned long unmap_free_arm(struct exmap_tbl *ent)
 +{
 +      unsigned long size;
 +
 +      /* clearing ARM MMU */
 +      size = 1 << (ent->order + PAGE_SHIFT);
 +      exmap_clear_armmmu((unsigned long)ent->vadr, size);
 +
 +      /* freeing allocated memory */
 +      if (ent->type == EXMAP_TYPE_MEM) {
 +              omap_mmu_free_pages((unsigned long)ent->buf, ent->order);
 +              printk(KERN_DEBUG
 +                     "MMU: freeing 0x%lx bytes @ adr 0x%8p\n",
 +                     size, ent->buf);
 +      }
 +
 +      ent->valid = 0;
 +      return size;
 +}
 +
 +int omap_mmu_exunmap(struct omap_mmu *mmu, unsigned long dspadr)
 +{
 +      void *vadr;
 +      unsigned long size;
 +      int total = 0;
 +      struct exmap_tbl *ent;
 +      int idx;
 +
 +      vadr = omap_mmu_to_virt(mmu, dspadr);
 +      down_write(&mmu->exmap_sem);
 +      for (idx = 0; idx < mmu->nr_tlb_entries; idx++) {
 +              ent = mmu->exmap_tbl + idx;
 +              if (!ent->valid || ent->prsvd)
 +                      continue;
 +              if (ent->vadr == vadr)
 +                      goto found_map;
 +      }
 +      up_write(&mmu->exmap_sem);
 +      printk(KERN_WARNING
 +             "MMU: address %06lx not found in exmap_tbl.\n", dspadr);
 +      return -EINVAL;
 +
 +found_map:
 +      if (ent->usecount > 0) {
 +              printk(KERN_ERR
 +                     "MMU: exmap reference count is not 0.\n"
 +                     "   idx=%d, vadr=%p, order=%d, usecount=%d\n",
 +                     idx, ent->vadr, ent->order, ent->usecount);
 +              up_write(&mmu->exmap_sem);
 +              return -EINVAL;
 +      }
 +      /* clearing DSP PTE entry */
 +      omap_mmu_clear_pte_entry(mmu, dspadr);
 +
 +      /* clear ARM MMU and free buffer */
 +      size = unmap_free_arm(ent);
 +      total += size;
 +
 +      /* we don't free PTEs */
 +
 +      /* flush TLB */
 +      flush_tlb_kernel_range((unsigned long)vadr, (unsigned long)vadr + size);
 +
 +      /* check if next mapping is in same group */
 +      idx = ent->link.next;
 +      if (idx < 0)
 +              goto up_out;    /* normal completion */
 +      ent = mmu->exmap_tbl + idx;
 +      dspadr += size;
 +      vadr   += size;
 +      if (ent->vadr == vadr)
 +              goto found_map; /* continue */
 +
 +      printk(KERN_ERR
 +             "MMU: illegal exmap_tbl grouping!\n"
 +             "expected vadr = %p, exmap_tbl[%d].vadr = %p\n",
 +             vadr, idx, ent->vadr);
 +      up_write(&mmu->exmap_sem);
 +      return -EINVAL;
 +
 +up_out:
 +      up_write(&mmu->exmap_sem);
 +      return total;
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_exunmap);
 +
 +void omap_mmu_exmap_flush(struct omap_mmu *mmu)
 +{
 +      struct exmap_tbl *ent;
 +
 +      down_write(&mmu->exmap_sem);
 +
 +      /* clearing TLB entry */
 +      omap_mmu_gflush(mmu);
 +
 +      omap_mmu_for_each_tlb_entry(mmu, ent)
 +              if (ent->valid && !ent->prsvd)
 +                      unmap_free_arm(ent);
 +
 +      /* flush TLB */
 +      if (likely(mmu->membase))
 +              flush_tlb_kernel_range(mmu->membase + mmu->memsize,
 +                                     mmu->membase + (1 << mmu->addrspace));
 +
 +      up_write(&mmu->exmap_sem);
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_exmap_flush);
 +
 +void exmap_setup_preserved_mem_page(struct omap_mmu *mmu, void *buf,
 +                                  unsigned long dspadr, int index)
 +{
 +      unsigned long phys;
 +      void *virt;
 +      struct omap_mmu_tlb_entry tlb_ent;
 +
 +      phys = __pa(buf);
 +      virt = omap_mmu_to_virt(mmu, dspadr);
 +      exmap_set_armmmu((unsigned long)virt, phys, PAGE_SIZE);
 +      INIT_EXMAP_TBL_ENTRY_4KB_PRESERVED(mmu->exmap_tbl + index, buf, virt);
 +      INIT_TLB_ENTRY_4KB_PRESERVED(&tlb_ent, dspadr, phys);
 +      omap_mmu_load_pte_entry(mmu, &tlb_ent);
 +}
 +EXPORT_SYMBOL_GPL(exmap_setup_preserved_mem_page);
 +
 +void exmap_clear_mem_page(struct omap_mmu *mmu, unsigned long dspadr)
 +{
 +      void *virt = omap_mmu_to_virt(mmu, dspadr);
 +
 +      exmap_clear_armmmu((unsigned long)virt, PAGE_SIZE);
 +      /* DSP MMU is shutting down. not handled here. */
 +}
 +EXPORT_SYMBOL_GPL(exmap_clear_mem_page);
 +
 +static void omap_mmu_reset(struct omap_mmu *mmu)
 +{
 +      int i;
 +
 +      omap_mmu_write_reg(mmu, 0x2, MMU_SYSCONFIG);
 +
 +      for (i = 0; i < 10000; i++)
 +              if (likely(omap_mmu_read_reg(mmu, MMU_SYSSTATUS) & 0x1))
 +                      break;
 +}
 +
 +void omap_mmu_disable(struct omap_mmu *mmu)
 +{
 +      omap_mmu_write_reg(mmu, 0x00, MMU_CNTL);
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_disable);
 +
 +void omap_mmu_enable(struct omap_mmu *mmu, int reset)
 +{
 +      u32 val = MMU_CNTL_MMUENABLE;
 +      u32 pa = (u32)virt_to_phys(mmu->twl_mm->pgd);
 +
 +      if (likely(reset))
 +              omap_mmu_reset(mmu);
 +
 +      if (mmu->ops->pte_get_attr) {
 +              omap_mmu_write_reg(mmu, pa, MMU_TTB);
 +              val |= MMU_CNTL_TWLENABLE;
 +      }
 +
 +      omap_mmu_write_reg(mmu, val, MMU_CNTL);
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_enable);
 +
 +static irqreturn_t omap_mmu_interrupt(int irq, void *dev_id)
 +{
 +      struct omap_mmu *mmu = dev_id;
 +
 +      if (likely(mmu->ops->interrupt))
 +              mmu->ops->interrupt(mmu);
 +
 +      return IRQ_HANDLED;
 +}
 +
 +static int omap_mmu_init(struct omap_mmu *mmu)
 +{
 +      struct omap_mmu_tlb_lock tlb_lock;
 +      int ret = 0;
 +
 +      clk_enable(mmu->clk);
 +      omap_dsp_request_mem();
 +      down_write(&mmu->exmap_sem);
 +
 +      ret = request_irq(mmu->irq, omap_mmu_interrupt, IRQF_DISABLED,
 +                        mmu->name,  mmu);
 +      if (ret < 0) {
 +              printk(KERN_ERR
 +                     "failed to register MMU interrupt: %d\n", ret);
 +              goto fail;
 +      }
 +
 +      omap_mmu_disable(mmu);  /* clear all */
 +      udelay(100);
 +      omap_mmu_enable(mmu, 1);
 +
 +      memset(&tlb_lock, 0, sizeof(struct omap_mmu_tlb_lock));
 +      omap_mmu_set_tlb_lock(mmu, &tlb_lock);
 +
 +      if (unlikely(mmu->ops->startup))
 +              ret = mmu->ops->startup(mmu);
 + fail:
 +      up_write(&mmu->exmap_sem);
 +      omap_dsp_release_mem();
 +      clk_disable(mmu->clk);
 +
 +      return ret;
 +}
 +
 +static void omap_mmu_shutdown(struct omap_mmu *mmu)
 +{
 +      free_irq(mmu->irq, mmu);
 +
 +      if (unlikely(mmu->ops->shutdown))
 +              mmu->ops->shutdown(mmu);
 +
 +      omap_mmu_exmap_flush(mmu);
 +      omap_mmu_disable(mmu); /* clear all */
 +}
 +
 +/*
 + * omap_mmu_mem_enable() / disable()
 + */
 +int omap_mmu_mem_enable(struct omap_mmu *mmu, void *addr)
 +{
 +      if (unlikely(mmu->ops->mem_enable))
 +              return mmu->ops->mem_enable(mmu, addr);
 +
 +      down_read(&mmu->exmap_sem);
 +      return 0;
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_mem_enable);
 +
 +void omap_mmu_mem_disable(struct omap_mmu *mmu, void *addr)
 +{
 +      if (unlikely(mmu->ops->mem_disable)) {
 +              mmu->ops->mem_disable(mmu, addr);
 +              return;
 +      }
 +
 +      up_read(&mmu->exmap_sem);
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_mem_disable);
 +
 +/*
 + * dsp_mem file operations
 + */
 +static ssize_t intmem_read(struct omap_mmu *mmu, char *buf, size_t count,
 +                         loff_t *ppos)
 +{
 +      unsigned long p = *ppos;
 +      void *vadr = omap_mmu_to_virt(mmu, p);
 +      ssize_t size = mmu->memsize;
 +      ssize_t read;
 +
 +      if (p >= size)
 +              return 0;
 +      clk_enable(mmu->memclk);
 +      read = count;
 +      if (count > size - p)
 +              read = size - p;
 +      if (copy_to_user(buf, vadr, read)) {
 +              read = -EFAULT;
 +              goto out;
 +      }
 +      *ppos += read;
 +out:
 +      clk_disable(mmu->memclk);
 +      return read;
 +}
 +
 +static ssize_t exmem_read(struct omap_mmu *mmu, char *buf, size_t count,
 +                        loff_t *ppos)
 +{
 +      unsigned long p = *ppos;
 +      void *vadr = omap_mmu_to_virt(mmu, p);
 +
 +      if (!exmap_valid(mmu, vadr, count)) {
 +              printk(KERN_ERR
 +                     "MMU: DSP address %08lx / size %08x "
 +                     "is not valid!\n", p, count);
 +              return -EFAULT;
 +      }
 +      if (count > (1 << mmu->addrspace) - p)
 +              count = (1 << mmu->addrspace) - p;
 +      if (copy_to_user(buf, vadr, count))
 +              return -EFAULT;
 +      *ppos += count;
 +
 +      return count;
 +}
 +
 +static ssize_t omap_mmu_mem_read(struct kobject *kobj, char *buf,
 +                               loff_t offset, size_t count)
 +{
 +      struct device *dev = to_dev(kobj);
 +      struct omap_mmu *mmu = dev_get_drvdata(dev);
 +      unsigned long p = (unsigned long)offset;
 +      void *vadr = omap_mmu_to_virt(mmu, p);
 +      int ret;
 +
 +      if (omap_mmu_mem_enable(mmu, vadr) < 0)
 +              return -EBUSY;
 +
 +      if (p < mmu->memsize)
 +              ret = intmem_read(mmu, buf, count, &offset);
 +      else
 +              ret = exmem_read(mmu, buf, count, &offset);
 +
 +      omap_mmu_mem_disable(mmu, vadr);
 +
 +      return ret;
 +}
 +
 +static ssize_t intmem_write(struct omap_mmu *mmu, const char *buf, size_t count,
 +                          loff_t *ppos)
 +{
 +      unsigned long p = *ppos;
 +      void *vadr = omap_mmu_to_virt(mmu, p);
 +      ssize_t size = mmu->memsize;
 +      ssize_t written;
 +
 +      if (p >= size)
 +              return 0;
 +      clk_enable(mmu->memclk);
 +      written = count;
 +      if (count > size - p)
 +              written = size - p;
 +      if (copy_from_user(vadr, buf, written)) {
 +              written = -EFAULT;
 +              goto out;
 +      }
 +      *ppos += written;
 +out:
 +      clk_disable(mmu->memclk);
 +      return written;
 +}
 +
 +static ssize_t exmem_write(struct omap_mmu *mmu, char *buf, size_t count,
 +                         loff_t *ppos)
 +{
 +      unsigned long p = *ppos;
 +      void *vadr = omap_mmu_to_virt(mmu, p);
 +
 +      if (!exmap_valid(mmu, vadr, count)) {
 +              printk(KERN_ERR
 +                     "MMU: DSP address %08lx / size %08x "
 +                     "is not valid!\n", p, count);
 +              return -EFAULT;
 +      }
 +      if (count > (1 << mmu->addrspace) - p)
 +              count = (1 << mmu->addrspace) - p;
 +      if (copy_from_user(vadr, buf, count))
 +              return -EFAULT;
 +      *ppos += count;
 +
 +      return count;
 +}
 +
 +static ssize_t omap_mmu_mem_write(struct kobject *kobj, char *buf,
 +                                loff_t offset, size_t count)
 +{
 +      struct device *dev = to_dev(kobj);
 +      struct omap_mmu *mmu = dev_get_drvdata(dev);
 +      unsigned long p = (unsigned long)offset;
 +      void *vadr = omap_mmu_to_virt(mmu, p);
 +      int ret;
 +
 +      if (omap_mmu_mem_enable(mmu, vadr) < 0)
 +              return -EBUSY;
 +
 +      if (p < mmu->memsize)
 +              ret = intmem_write(mmu, buf, count, &offset);
 +      else
 +              ret = exmem_write(mmu, buf, count, &offset);
 +
 +      omap_mmu_mem_disable(mmu, vadr);
 +
 +      return ret;
 +}
 +
 +static struct bin_attribute dev_attr_mem = {
 +      .attr   = {
 +              .name   = "mem",
 +              .owner  = THIS_MODULE,
 +              .mode   = S_IRUSR | S_IWUSR | S_IRGRP,
 +      },
 +
 +      .read   = omap_mmu_mem_read,
 +      .write  = omap_mmu_mem_write,
 +};
 +
 +/* To be obsolete for backward compatibility */
 +ssize_t __omap_mmu_mem_read(struct omap_mmu *mmu, char *buf,
 +                          loff_t offset, size_t count)
 +{
 +      return omap_mmu_mem_read(&mmu->dev.kobj, buf, offset, count);
 +}
 +EXPORT_SYMBOL_GPL(__omap_mmu_mem_read);
 +
 +ssize_t __omap_mmu_mem_write(struct omap_mmu *mmu, char *buf,
 +                           loff_t offset, size_t count)
 +{
 +      return omap_mmu_mem_write(&mmu->dev.kobj, buf, offset, count);
 +}
 +EXPORT_SYMBOL_GPL(__omap_mmu_mem_write);
 +
 +/*
 + * sysfs files
 + */
 +static ssize_t omap_mmu_show(struct device *dev, struct device_attribute *attr,
 +                           char *buf)
 +{
 +      struct omap_mmu *mmu = dev_get_drvdata(dev);
 +      struct omap_mmu_tlb_lock tlb_lock;
 +      int ret = -EIO;
 +
 +      clk_enable(mmu->clk);
 +      omap_dsp_request_mem();
 +
 +      down_read(&mmu->exmap_sem);
 +
 +      omap_mmu_get_tlb_lock(mmu, &tlb_lock);
 +
 +      if (likely(mmu->ops->show))
 +              ret = mmu->ops->show(mmu, buf, &tlb_lock);
 +
 +      /* restore victim entry */
 +      omap_mmu_set_tlb_lock(mmu, &tlb_lock);
 +
 +      up_read(&mmu->exmap_sem);
 +      omap_dsp_release_mem();
 +      clk_disable(mmu->clk);
 +
 +      return ret;
 +}
 +
 +static DEVICE_ATTR(mmu, S_IRUGO, omap_mmu_show, NULL);
 +
 +static ssize_t exmap_show(struct device *dev, struct device_attribute *attr,
 +                        char *buf)
 +{
 +      struct omap_mmu *mmu = dev_get_drvdata(dev);
 +      struct exmap_tbl *ent;
 +      int len;
 +      int i = 0;
 +
 +      down_read(&mmu->exmap_sem);
 +      len = sprintf(buf, "  dspadr     size         buf     size uc\n");
 +                       /* 0x300000 0x123000  0xc0171000 0x100000  0*/
 +
 +      omap_mmu_for_each_tlb_entry(mmu, ent) {
 +              void *vadr;
 +              unsigned long size;
 +              enum exmap_type type;
 +              int idx;
 +
 +              /* find a top of link */
 +              if (!ent->valid || (ent->link.prev >= 0))
 +                      continue;
 +
 +              vadr = ent->vadr;
 +              type = ent->type;
 +              size = 0;
 +              idx = i;
 +              do {
 +                      ent = mmu->exmap_tbl + idx;
 +                      size += PAGE_SIZE << ent->order;
 +              } while ((idx = ent->link.next) >= 0);
 +
 +              len += sprintf(buf + len, "0x%06lx %#8lx",
 +                             virt_to_omap_mmu(mmu, vadr), size);
 +
 +              if (type == EXMAP_TYPE_FB) {
 +                      len += sprintf(buf + len, "    framebuf\n");
 +              } else {
 +                      len += sprintf(buf + len, "\n");
 +                      idx = i;
 +                      do {
 +                              ent = mmu->exmap_tbl + idx;
 +                              len += sprintf(buf + len,
 +                                             /* 0xc0171000 0x100000  0*/
 +                                             "%19s0x%8p %#8lx %2d\n",
 +                                             "", ent->buf,
 +                                             PAGE_SIZE << ent->order,
 +                                             ent->usecount);
 +                      } while ((idx = ent->link.next) >= 0);
 +              }
 +
 +              i++;
 +      }
 +
 +      up_read(&mmu->exmap_sem);
 +      return len;
 +}
 +
 +static ssize_t exmap_store(struct device *dev, struct device_attribute *attr,
 +                         const char *buf,
 +                         size_t count)
 +{
 +      struct omap_mmu *mmu = dev_get_drvdata(dev);
 +      unsigned long base = 0, len = 0;
 +      int ret;
 +
 +      sscanf(buf, "%lx %lx", &base, &len);
 +
 +      if (!base)
 +              return -EINVAL;
 +
 +      if (len) {
 +              /* Add the mapping */
 +              ret = omap_mmu_exmap(mmu, base, 0, len, EXMAP_TYPE_MEM);
 +              if (ret < 0)
 +                      return ret;
 +      } else {
 +              /* Remove the mapping */
 +              ret = omap_mmu_exunmap(mmu, base);
 +              if (ret < 0)
 +                      return ret;
 +      }
 +
 +      return count;
 +}
 +
 +static DEVICE_ATTR(exmap, S_IRUGO | S_IWUSR, exmap_show, exmap_store);
 +
 +static ssize_t mempool_show(struct class *class, char *buf)
 +{
 +      int min_nr_1M = 0, curr_nr_1M = 0;
 +      int min_nr_64K = 0, curr_nr_64K = 0;
 +      int total = 0;
 +
 +      if (likely(mempool_1M)) {
 +              min_nr_1M  = mempool_1M->min_nr;
 +              curr_nr_1M = mempool_1M->curr_nr;
 +              total += min_nr_1M * SZ_1M;
 +      }
 +      if (likely(mempool_64K)) {
 +              min_nr_64K  = mempool_64K->min_nr;
 +              curr_nr_64K = mempool_64K->curr_nr;
 +              total += min_nr_64K * SZ_64K;
 +      }
 +
 +      return sprintf(buf,
 +                     "0x%x\n"
 +                     "1M  buffer: %d (%d free)\n"
 +                     "64K buffer: %d (%d free)\n",
 +                     total, min_nr_1M, curr_nr_1M, min_nr_64K, curr_nr_64K);
 +}
 +
 +
 +static CLASS_ATTR(mempool, S_IRUGO, mempool_show, NULL);
 +
 +static void omap_mmu_class_dev_release(struct device *dev)
 +{
 +}
 +
 +static struct class omap_mmu_class = {
 +      .name           = "mmu",
 +      .dev_release    = omap_mmu_class_dev_release,
 +};
 +
 +int omap_mmu_register(struct omap_mmu *mmu)
 +{
 +      int ret;
 +
 +      mmu->dev.class = &omap_mmu_class;
 +      strlcpy(mmu->dev.bus_id, mmu->name, KOBJ_NAME_LEN);
 +      dev_set_drvdata(&mmu->dev, mmu);
 +
 +      mmu->exmap_tbl = kzalloc(sizeof(struct exmap_tbl) * mmu->nr_tlb_entries,
 +                               GFP_KERNEL);
 +      if (!mmu->exmap_tbl)
 +              return -ENOMEM;
 +
 +      if (mmu->ops->pte_get_attr) {
 +              struct mm_struct *mm =  mm_alloc();
 +              if (!mm) {
 +                      ret = -ENOMEM;
 +                      goto err_mm_alloc;
 +              }
 +              mmu->twl_mm = mm;
 +      }
 +
 +      ret = device_register(&mmu->dev);
 +      if (unlikely(ret))
 +              goto err_dev_register;
 +
 +      init_rwsem(&mmu->exmap_sem);
 +
 +      ret = omap_mmu_read_reg(mmu, MMU_REVISION);
 +      printk(KERN_NOTICE "MMU: OMAP %s MMU initialized (HW v%d.%d)\n",
 +             mmu->name, (ret >> 4) & 0xf, ret & 0xf);
 +
 +      ret = omap_mmu_init(mmu);
 +      if (unlikely(ret))
 +              goto err_mmu_init;
 +
 +      ret = device_create_file(&mmu->dev, &dev_attr_mmu);
 +      if (unlikely(ret))
 +              goto err_dev_create_mmu;
 +      ret = device_create_file(&mmu->dev, &dev_attr_exmap);
 +      if (unlikely(ret))
 +              goto err_dev_create_exmap;
 +
 +      if (likely(mmu->membase)) {
 +              dev_attr_mem.size = mmu->memsize;
 +              ret = device_create_bin_file(&mmu->dev,
 +                                           &dev_attr_mem);
 +              if (unlikely(ret))
 +                      goto err_bin_create_mem;
 +      }
 +
 +      return 0;
 +
 +err_bin_create_mem:
 +      device_remove_file(&mmu->dev, &dev_attr_exmap);
 +err_dev_create_exmap:
 +      device_remove_file(&mmu->dev, &dev_attr_mmu);
 +err_dev_create_mmu:
 +      omap_mmu_shutdown(mmu);
 +err_mmu_init:
 +      device_unregister(&mmu->dev);
 +err_dev_register:
 +      kfree(mmu->twl_mm);
 +      mmu->twl_mm = NULL;
 +err_mm_alloc:
 +      kfree(mmu->exmap_tbl);
 +      mmu->exmap_tbl = NULL;
 +      return ret;
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_register);
 +
 +void omap_mmu_unregister(struct omap_mmu *mmu)
 +{
 +      omap_mmu_shutdown(mmu);
 +      omap_mmu_kmem_release();
 +
 +      device_remove_file(&mmu->dev, &dev_attr_mmu);
 +      device_remove_file(&mmu->dev, &dev_attr_exmap);
 +
 +      if (likely(mmu->membase))
 +              device_remove_bin_file(&mmu->dev,
 +                                           &dev_attr_mem);
 +
 +      kfree(mmu->exmap_tbl);
 +      mmu->exmap_tbl = NULL;
 +
 +      if (mmu->ops->pte_get_attr) {
 +              if (mmu->twl_mm) {
 +                      __mmdrop(mmu->twl_mm);
 +                      mmu->twl_mm = NULL;
 +              }
 +      }
 +
 +      device_unregister(&mmu->dev);
 +}
 +EXPORT_SYMBOL_GPL(omap_mmu_unregister);
 +
 +static int __init omap_mmu_class_init(void)
 +{
 +      int ret = class_register(&omap_mmu_class);
 +      if (!ret)
 +              ret = class_create_file(&omap_mmu_class, &class_attr_mempool);
 +
 +      return ret;
 +}
 +
 +static void __exit omap_mmu_class_exit(void)
 +{
 +      class_remove_file(&omap_mmu_class, &class_attr_mempool);
 +      class_unregister(&omap_mmu_class);
 +}
 +
 +subsys_initcall(omap_mmu_class_init);
 +module_exit(omap_mmu_class_exit);
 +
 +MODULE_LICENSE("GPL");
Simple merge
Simple merge
Simple merge
@@@ -64,8 -62,11 +67,10 @@@ obj-$(CONFIG_GAMEPORT)              += input/gamepo
  obj-$(CONFIG_INPUT)           += input/
  obj-$(CONFIG_I2O)             += message/
  obj-$(CONFIG_RTC_LIB)         += rtc/
 -obj-y                         += i2c/
  obj-$(CONFIG_W1)              += w1/
+ obj-$(CONFIG_POWER_SUPPLY)    += power/
  obj-$(CONFIG_HWMON)           += hwmon/
+ obj-$(CONFIG_WATCHDOG)                += char/watchdog/
  obj-$(CONFIG_PHONE)           += telephony/
  obj-$(CONFIG_MD)              += md/
  obj-$(CONFIG_BT)              += bluetooth/
@@@ -152,18 -134,15 +152,17 @@@ static int omap_wdt_open(struct inode *
        }
  
        /* initialize prescaler */
 -      while (omap_readl(OMAP_WATCHDOG_WPS) & 0x01)
 +      while (omap_readl(base + OMAP_WATCHDOG_WPS) & 0x01)
                cpu_relax();
 -      omap_writel((1 << 5) | (PTV << 2), OMAP_WATCHDOG_CNTRL);
 -      while (omap_readl(OMAP_WATCHDOG_WPS) & 0x01)
 +      omap_writel((1 << 5) | (PTV << 2), base + OMAP_WATCHDOG_CNTRL);
 +      while (omap_readl(base + OMAP_WATCHDOG_WPS) & 0x01)
                cpu_relax();
  
 -      omap_wdt_set_timeout();
 -      omap_wdt_enable();
 +      file->private_data = (void *) wdev;
 +
 +      omap_wdt_set_timeout(wdev);
 +      omap_wdt_enable(wdev);
-       return 0;
+       return nonseekable_open(inode, file);
  }
  
  static int omap_wdt_release(struct inode *inode, struct file *file)
Simple merge
@@@ -100,42 -116,8 +116,42 @@@ config TPS6501
          This driver can also be built as a module.  If so, the module
          will be called tps65010.
  
 +config SENSORS_TLV320AIC23
 +      tristate "Texas Instruments TLV320AIC23 Codec"
 +      depends on I2C && I2C_OMAP
 +      help
 +        If you say yes here you get support for the I2C control
 +        interface for Texas Instruments TLV320AIC23 audio codec.
 +
 +config GPIOEXPANDER_OMAP
 +      bool "GPIO Expander PCF8574PWR for OMAP"
 +      depends on I2C && (ARCH_OMAP16XX || ARCH_OMAP24XX)
 +      help
 +        If you say yes here you get support for I/O expander calls
 +        to configure IrDA, Camera and audio devices.
 +
 +config MENELAUS
 +      bool "TWL92330/Menelaus PM chip"
 +      depends on I2C=y && ARCH_OMAP24XX
 +      help
 +        If you say yes here you get support for the Texas Instruments
 +        TWL92330/Menelaus Power Management chip. This include voltage
 +        regulators, Dual slot memory card tranceivers, real-time clock
 +        and other features that are often used in portable devices like
 +        cell phones and PDAs.
 +
 +config TWL4030_CORE
 +      bool "TI's TWL4030 companion chip Core Driver Support"
 +      depends on I2C=y && ARCH_OMAP24XX
 +      help
 +        Say yes here if you have TWL4030 chip on your board
 +
 +config TWL4030_GPIO
 +      bool "TWL4030 GPIO Driver"
 +      depends on TWL4030_CORE
 +
  config SENSORS_M41T00
-       tristate "ST M41T00 RTC chip"
+       tristate "ST M41T00 RTC chip (DEPRECATED)"
        depends on PPC32
        help
          If you say yes here you get support for the ST M41T00 RTC chip.
@@@ -12,12 -13,8 +13,13 @@@ obj-$(CONFIG_SENSORS_PCF8574)       += pcf857
  obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
  obj-$(CONFIG_ISP1301_OMAP)    += isp1301_omap.o
  obj-$(CONFIG_TPS65010)                += tps65010.o
 +obj-$(CONFIG_SENSORS_TLV320AIC23) += tlv320aic23.o
 +obj-$(CONFIG_GPIOEXPANDER_OMAP)       += gpio_expander_omap.o
  obj-$(CONFIG_MENELAUS)                += menelaus.o
+ obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
 +obj-$(CONFIG_TWL4030_CORE)      += twl4030_core.o
 +obj-$(CONFIG_TWL4030_GPIO)    += twl4030_gpio.o
 +obj-$(CONFIG_RTC_X1205_I2C)   += x1205.o
  
  ifeq ($(CONFIG_I2C_DEBUG_CHIP),y)
  EXTRA_CFLAGS += -DDEBUG
Simple merge
Simple merge
Simple merge
@@@ -14,10 -14,9 +14,11 @@@ obj-$(CONFIG_LEDS_S3C24XX)          += leds-s3c
  obj-$(CONFIG_LEDS_AMS_DELTA)          += leds-ams-delta.o
  obj-$(CONFIG_LEDS_NET48XX)            += leds-net48xx.o
  obj-$(CONFIG_LEDS_WRAP)                       += leds-wrap.o
 +obj-$(CONFIG_LEDS_OMAP)                       += leds-omap.o
 +obj-$(CONFIG_LEDS_OMAP_PWM)           += leds-omap-pwm.o
  obj-$(CONFIG_LEDS_H1940)              += leds-h1940.o
  obj-$(CONFIG_LEDS_COBALT)             += leds-cobalt.o
+ obj-$(CONFIG_LEDS_GPIO)                       += leds-gpio.o
  
  # LED Triggers
  obj-$(CONFIG_LEDS_TRIGGER_TIMER)      += ledtrig-timer.o
index 6b195d6,0000000..7562c6d
mode 100644,000000..100644
--- /dev/null
@@@ -1,354 -1,0 +1,358 @@@
- static ssize_t omap_pwm_led_on_period_show(struct class_device *cdev, char *buf)
 +/* drivers/leds/leds-omap_pwm.c
 + *
 + * Driver to blink LEDs using OMAP PWM timers
 + *
 + * Copyright (C) 2006 Nokia Corporation
 + * Author: Timo Teras
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License version 2 as
 + * published by the Free Software Foundation.
 +*/
 +
 +#include <linux/kernel.h>
 +#include <linux/init.h>
 +#include <linux/err.h>
 +#include <linux/platform_device.h>
 +#include <linux/leds.h>
 +#include <linux/ctype.h>
 +#include <asm/delay.h>
 +#include <asm/arch/board.h>
 +#include <asm/arch/dmtimer.h>
 +
 +struct omap_pwm_led {
 +      struct led_classdev cdev;
 +      struct omap_pwm_led_platform_data *pdata;
 +      struct omap_dm_timer *intensity_timer;
 +      struct omap_dm_timer *blink_timer;
 +      int powered;
 +      unsigned int on_period, off_period;
 +};
 +
 +static inline struct omap_pwm_led *pdev_to_omap_pwm_led(struct platform_device *pdev)
 +{
 +      return platform_get_drvdata(pdev);
 +}
 +
 +static inline struct omap_pwm_led *cdev_to_omap_pwm_led(struct led_classdev *led_cdev)
 +{
 +      return container_of(led_cdev, struct omap_pwm_led, cdev);
 +}
 +
 +static void omap_pwm_led_set_blink(struct omap_pwm_led *led)
 +{
 +      if (!led->powered)
 +              return;
 +
 +      if (led->on_period != 0 && led->off_period != 0) {
 +              unsigned long load_reg, cmp_reg;
 +
 +              load_reg = 32768 * (led->on_period + led->off_period) / 1000;
 +              cmp_reg = 32768 * led->on_period / 1000;
 +
 +              omap_dm_timer_stop(led->blink_timer);
 +              omap_dm_timer_set_load(led->blink_timer, 1, -load_reg);
 +              omap_dm_timer_set_match(led->blink_timer, 1, -cmp_reg);
 +              omap_dm_timer_set_pwm(led->blink_timer, 1, 1,
 +                                    OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE);
 +              omap_dm_timer_write_counter(led->blink_timer, -2);
 +              omap_dm_timer_start(led->blink_timer);
 +      } else {
 +              omap_dm_timer_set_pwm(led->blink_timer, 1, 1,
 +                                    OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE);
 +              omap_dm_timer_stop(led->blink_timer);
 +      }
 +}
 +
 +static void omap_pwm_led_power_on(struct omap_pwm_led *led)
 +{
 +      if (led->powered)
 +              return;
 +      led->powered = 1;
 +
 +      /* Select clock */
 +      omap_dm_timer_enable(led->intensity_timer);
 +      omap_dm_timer_set_source(led->intensity_timer, OMAP_TIMER_SRC_32_KHZ);
 +
 +      /* Turn voltage on */
 +      if (led->pdata->set_power != NULL)
 +              led->pdata->set_power(led->pdata, 1);
 +
 +      /* Enable PWM timers */
 +      if (led->blink_timer != NULL) {
 +              omap_dm_timer_enable(led->blink_timer);
 +              omap_dm_timer_set_source(led->blink_timer,
 +                                       OMAP_TIMER_SRC_32_KHZ);
 +              omap_pwm_led_set_blink(led);
 +      }
 +
 +      omap_dm_timer_set_load(led->intensity_timer, 1, 0xffffff00);
 +}
 +
 +static void omap_pwm_led_power_off(struct omap_pwm_led *led)
 +{
 +      if (!led->powered)
 +              return;
 +      led->powered = 0;
 +
 +      /* Everything off */
 +      omap_dm_timer_stop(led->intensity_timer);
 +      omap_dm_timer_disable(led->intensity_timer);
 +
 +      if (led->blink_timer != NULL) {
 +              omap_dm_timer_stop(led->blink_timer);
 +              omap_dm_timer_disable(led->blink_timer);
 +      }
 +
 +      if (led->pdata->set_power != NULL)
 +              led->pdata->set_power(led->pdata, 0);
 +}
 +
 +static void omap_pwm_led_set_pwm_cycle(struct omap_pwm_led *led, int cycle)
 +{
 +      int n;
 +
 +      if (cycle == 0)
 +              n = 0xff;
 +      else    n = cycle - 1;
 +
 +      if (cycle == LED_FULL) {
 +              omap_dm_timer_set_pwm(led->intensity_timer, 1, 1,
 +                                    OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE);
 +              omap_dm_timer_stop(led->intensity_timer);
 +      } else {
 +              omap_dm_timer_set_pwm(led->intensity_timer, 0, 1,
 +                                    OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE);
 +              omap_dm_timer_set_match(led->intensity_timer, 1,
 +                                      (0xffffff00) | cycle);
 +              omap_dm_timer_start(led->intensity_timer);
 +      }
 +}
 +
 +static void omap_pwm_led_set(struct led_classdev *led_cdev,
 +                           enum led_brightness value)
 +{
 +      struct omap_pwm_led *led = cdev_to_omap_pwm_led(led_cdev);
 +
 +      if (value != LED_OFF) {
 +              omap_pwm_led_power_on(led);
 +              omap_pwm_led_set_pwm_cycle(led, value);
 +      } else {
 +              omap_pwm_led_power_off(led);
 +      }
 +}
 +
-       struct led_classdev *led_cdev = class_get_devdata(cdev);
++static ssize_t omap_pwm_led_on_period_show(struct device *dev,
++                              struct device_attribute *attr, char *buf)
 +{
- static ssize_t omap_pwm_led_on_period_store(struct class_device *cdev,
-                                           const char *buf, size_t size)
++      struct led_classdev *led_cdev = dev_get_drvdata(dev);
 +      struct omap_pwm_led *led = cdev_to_omap_pwm_led(led_cdev);
 +
 +      return sprintf(buf, "%u\n", led->on_period) + 1;
 +}
 +
-       struct led_classdev *led_cdev = class_get_devdata(cdev);
++static ssize_t omap_pwm_led_on_period_store(struct device *dev,
++                              struct device_attribute *attr,
++                              const char *buf, size_t size)
 +{
- static ssize_t omap_pwm_led_off_period_show(struct class_device *cdev, char *buf)
++      struct led_classdev *led_cdev = dev_get_drvdata(dev);
 +      struct omap_pwm_led *led = cdev_to_omap_pwm_led(led_cdev);
 +      int ret = -EINVAL;
 +      unsigned long val;
 +      char *after;
 +      size_t count;
 +
 +      val = simple_strtoul(buf, &after, 10);
 +      count = after - buf;
 +      if (*after && isspace(*after))
 +              count++;
 +
 +      if (count == size) {
 +              led->on_period = val;
 +              omap_pwm_led_set_blink(led);
 +              ret = count;
 +      }
 +
 +      return ret;
 +}
 +
-       struct led_classdev *led_cdev = class_get_devdata(cdev);
++static ssize_t omap_pwm_led_off_period_show(struct device *dev,
++                              struct device_attribute *attr, char *buf)
 +{
- static ssize_t omap_pwm_led_off_period_store(struct class_device *cdev,
-                                            const char *buf, size_t size)
++      struct led_classdev *led_cdev = dev_get_drvdata(dev);
 +      struct omap_pwm_led *led = cdev_to_omap_pwm_led(led_cdev);
 +
 +      return sprintf(buf, "%u\n", led->off_period) + 1;
 +}
 +
-       struct led_classdev *led_cdev = class_get_devdata(cdev);
++static ssize_t omap_pwm_led_off_period_store(struct device *dev,
++                                      struct device_attribute *attr,
++                                      const char *buf, size_t size)
 +{
- static CLASS_DEVICE_ATTR(on_period, 0644, omap_pwm_led_on_period_show,
-                        omap_pwm_led_on_period_store);
- static CLASS_DEVICE_ATTR(off_period, 0644, omap_pwm_led_off_period_show,
-                        omap_pwm_led_off_period_store);
++      struct led_classdev *led_cdev = dev_get_drvdata(dev);
 +      struct omap_pwm_led *led = cdev_to_omap_pwm_led(led_cdev);
 +      int ret = -EINVAL;
 +      unsigned long val;
 +      char *after;
 +      size_t count;
 +
 +      val = simple_strtoul(buf, &after, 10);
 +      count = after - buf;
 +      if (*after && isspace(*after))
 +              count++;
 +
 +      if (count == size) {
 +              led->off_period = val;
 +              omap_pwm_led_set_blink(led);
 +              ret = count;
 +      }
 +
 +      return ret;
 +}
 +
-               ret = class_device_create_file(led->cdev.class_dev,
-                                              &class_device_attr_on_period);
++static DEVICE_ATTR(on_period, 0644, omap_pwm_led_on_period_show,
++                              omap_pwm_led_on_period_store);
++static DEVICE_ATTR(off_period, 0644, omap_pwm_led_off_period_show,
++                              omap_pwm_led_off_period_store);
 +
 +static int omap_pwm_led_probe(struct platform_device *pdev)
 +{
 +      struct omap_pwm_led_platform_data *pdata = pdev->dev.platform_data;
 +      struct omap_pwm_led *led;
 +      int ret;
 +
 +      led = kzalloc(sizeof(struct omap_pwm_led), GFP_KERNEL);
 +      if (led == NULL) {
 +              dev_err(&pdev->dev, "No memory for device\n");
 +              return -ENOMEM;
 +      }
 +
 +      platform_set_drvdata(pdev, led);
 +      led->cdev.brightness_set = omap_pwm_led_set;
 +      led->cdev.default_trigger = NULL;
 +      led->cdev.name = pdata->name;
 +      led->pdata = pdata;
 +
 +      dev_info(&pdev->dev, "OMAP PWM LED (%s) at GP timer %d/%d\n",
 +               pdata->name, pdata->intensity_timer, pdata->blink_timer);
 +
 +      /* register our new led device */
 +      ret = led_classdev_register(&pdev->dev, &led->cdev);
 +      if (ret < 0) {
 +              dev_err(&pdev->dev, "led_classdev_register failed\n");
 +              goto error_classdev;
 +      }
 +
 +      /* get related dm timers */
 +      led->intensity_timer = omap_dm_timer_request_specific(pdata->intensity_timer);
 +      if (led->intensity_timer == NULL) {
 +              dev_err(&pdev->dev, "failed to request intensity pwm timer\n");
 +              ret = -ENODEV;
 +              goto error_intensity;
 +      }
 +      omap_dm_timer_disable(led->intensity_timer);
 +
 +      if (pdata->blink_timer != 0) {
 +              led->blink_timer = omap_dm_timer_request_specific(pdata->blink_timer);
 +              if (led->blink_timer == NULL) {
 +                      dev_err(&pdev->dev, "failed to request blinking pwm timer\n");
 +                      ret = -ENODEV;
 +                      goto error_blink1;
 +              }
 +              omap_dm_timer_disable(led->blink_timer);
 +
-               ret = class_device_create_file(led->cdev.class_dev,
-                                               &class_device_attr_off_period);
++              ret = device_create_file(led->cdev.dev,
++                                             &dev_attr_on_period);
 +              if(ret)
 +                      goto error_blink2;
 +
-       class_device_remove_file(led->cdev.class_dev,
-                                &class_device_attr_on_period);
++              ret = device_create_file(led->cdev.dev,
++                                      &dev_attr_off_period);
 +              if(ret)
 +                      goto error_blink3;
 +
 +      }
 +
 +      return 0;
 +
 +error_blink3:
-       class_device_remove_file(led->cdev.class_dev,
-                                &class_device_attr_on_period);
-       class_device_remove_file(led->cdev.class_dev,
-                                &class_device_attr_off_period);
++      device_remove_file(led->cdev.dev,
++                               &dev_attr_on_period);
 +error_blink2:
 +      dev_err(&pdev->dev, "failed to create device file(s)\n");
 +error_blink1:
 +      omap_dm_timer_free(led->intensity_timer);
 +error_intensity:
 +      led_classdev_unregister(&led->cdev);
 +error_classdev:
 +      kfree(led);
 +      return ret;
 +}
 +
 +static int omap_pwm_led_remove(struct platform_device *pdev)
 +{
 +      struct omap_pwm_led *led = pdev_to_omap_pwm_led(pdev);
 +
++      device_remove_file(led->cdev.dev,
++                               &dev_attr_on_period);
++      device_remove_file(led->cdev.dev,
++                               &dev_attr_off_period);
 +      led_classdev_unregister(&led->cdev);
 +
 +      omap_pwm_led_set(&led->cdev, LED_OFF);
 +      if (led->blink_timer != NULL)
 +              omap_dm_timer_free(led->blink_timer);
 +      omap_dm_timer_free(led->intensity_timer);
 +      kfree(led);
 +
 +      return 0;
 +}
 +
 +#ifdef CONFIG_PM
 +static int omap_pwm_led_suspend(struct platform_device *pdev, pm_message_t state)
 +{
 +      struct omap_pwm_led *led = pdev_to_omap_pwm_led(pdev);
 +
 +      led_classdev_suspend(&led->cdev);
 +      return 0;
 +}
 +
 +static int omap_pwm_led_resume(struct platform_device *pdev)
 +{
 +      struct omap_pwm_led *led = pdev_to_omap_pwm_led(pdev);
 +
 +      led_classdev_resume(&led->cdev);
 +      return 0;
 +}
 +#else
 +#define omap_pwm_led_suspend NULL
 +#define omap_pwm_led_resume NULL
 +#endif
 +
 +static struct platform_driver omap_pwm_led_driver = {
 +      .probe          = omap_pwm_led_probe,
 +      .remove         = omap_pwm_led_remove,
 +      .suspend        = omap_pwm_led_suspend,
 +      .resume         = omap_pwm_led_resume,
 +      .driver         = {
 +              .name           = "omap_pwm_led",
 +              .owner          = THIS_MODULE,
 +      },
 +};
 +
 +static int __init omap_pwm_led_init(void)
 +{
 +      return platform_driver_register(&omap_pwm_led_driver);
 +}
 +
 +static void __exit omap_pwm_led_exit(void)
 +{
 +      platform_driver_unregister(&omap_pwm_led_driver);
 +}
 +
 +module_init(omap_pwm_led_init);
 +module_exit(omap_pwm_led_exit);
 +
 +MODULE_AUTHOR("Timo Teras");
 +MODULE_DESCRIPTION("OMAP PWM LED driver");
 +MODULE_LICENSE("GPL");
@@@ -323,24 -323,9 +323,24 @@@ config RADIO_ZOLTRIX_POR
        help
          Enter the I/O port of your Zoltrix radio card.
  
 +config RADIO_TEA5761
 +      tristate "Philips Semiconductors TEA5761 I2C FM Radio"
 +      select I2C
 +      select VIDEO_V4L2
 +      help
 +        Choose Y here if you have one of these AM/FM radio cards.
 +
 +        In order to control your radio card, you will need to use programs
 +        that are compatible with the Video For Linux 2 API.  Information on
 +        this API and pointers to "v4l" programs may be found at
 +        <file:Documentation/video4linux/API.html>.
 +
 +        To compile this driver as a module, choose M here: the
 +        module will be called radio-tea5761.
 +
  config USB_DSBR
-       tristate "D-Link USB FM radio support (EXPERIMENTAL)"
-       depends on USB && VIDEO_V4L2 && EXPERIMENTAL
+       tristate "D-Link/GemTek USB FM radio support"
+       depends on USB && VIDEO_V4L2
        ---help---
          Say Y here if you want to connect this type of radio to your
          computer's USB port. Note that the audio is not digital, and
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -232,8 -233,11 +233,21 @@@ config RTC_DRV_TWL9233
          platforms.  The support is integrated with the rest of
          the Menelaus driver; it's not separate module.
  
++config RTC_DRV_TWL4030
++      tristate "OMAP TWL4030 Real Time Clock"
++      depends on RTC_CLASS && TWL4030_CORE
++      help
++        If you say yes here you get support for internal Real-Time 
++        Clock of TWL4030 chip.
++
++        This driver can also be built as a module. If so, the module
++        will be called rtc-twl4030.
++
+ endif # I2C
  comment "SPI RTC drivers"
-       depends on RTC_CLASS
+ if SPI_MASTER
  
  config RTC_DRV_RS5C348
        tristate "Ricoh RS5C348A/B"
@@@ -25,20 -36,15 +36,16 @@@ obj-$(CONFIG_RTC_DRV_MAX6902)      += rtc-ma
  obj-$(CONFIG_RTC_DRV_OMAP)    += rtc-omap.o
  obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
  obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
+ obj-$(CONFIG_RTC_DRV_PL031)   += rtc-pl031.o
+ obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
+ obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
  obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
  obj-$(CONFIG_RTC_DRV_S3C)     += rtc-s3c.o
- obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
- obj-$(CONFIG_RTC_DRV_M48T86)  += rtc-m48t86.o
- obj-$(CONFIG_RTC_DRV_DS1553)  += rtc-ds1553.o
- obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
- obj-$(CONFIG_RTC_DRV_EP93XX)  += rtc-ep93xx.o
  obj-$(CONFIG_RTC_DRV_SA1100)  += rtc-sa1100.o
- obj-$(CONFIG_RTC_DRV_VR41XX)  += rtc-vr41xx.o
- obj-$(CONFIG_RTC_DRV_PL031)   += rtc-pl031.o
- obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
- obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
- obj-$(CONFIG_RTC_DRV_V3020)   += rtc-v3020.o
- obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
  obj-$(CONFIG_RTC_DRV_SH)      += rtc-sh.o
- obj-$(CONFIG_RTC_DRV_BFIN)    += rtc-bfin.o
+ obj-$(CONFIG_RTC_DRV_STK17TA8)        += rtc-stk17ta8.o
+ obj-$(CONFIG_RTC_DRV_TEST)    += rtc-test.o
 +obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl4030.o
+ obj-$(CONFIG_RTC_DRV_V3020)   += rtc-v3020.o
+ obj-$(CONFIG_RTC_DRV_VR41XX)  += rtc-vr41xx.o
+ obj-$(CONFIG_RTC_DRV_X1205)   += rtc-x1205.o
Simple merge
Simple merge
@@@ -29,11 -32,7 +32,12 @@@ obj-$(CONFIG_SPI_XILINX)            += xilinx_spi
  # SPI protocol drivers (device/link on bus)
  obj-$(CONFIG_SPI_AT25)                += at25.o
  obj-$(CONFIG_SPI_SPIDEV)      += spidev.o
+ obj-$(CONFIG_SPI_TLE62X0)     += tle62x0.o
 +obj-$(CONFIG_SPI_TSC2101)     += tsc2101.o
 +obj-$(CONFIG_SPI_TSC2102)     += tsc2102.o
 +obj-$(CONFIG_SPI_TSC2301)     += tsc2301.o
 +tsc2301-objs                  := tsc2301-core.o
 +tsc2301-$(CONFIG_SPI_TSC2301_AUDIO)   += tsc2301-mixer.o
  #     ... add above this line ...
  
  # SPI slave controller drivers (upstream link)
Simple merge
Simple merge
Simple merge
index 644aada,0000000..3277df0
mode 100644,000000..100644
--- /dev/null
@@@ -1,977 -1,0 +1,962 @@@
- static void *musb_g_ep0_alloc_buffer(struct usb_ep *ep, unsigned bytes,
-                       dma_addr_t * dma, gfp_t gfp_flags)
- {
-       *dma = DMA_ADDR_INVALID;
-       return kmalloc(bytes, gfp_flags);
- }
- static void musb_g_ep0_free_buffer(struct usb_ep *ep, void *address,
-                       dma_addr_t dma, unsigned bytes)
- {
-       kfree(address);
- }
 +/******************************************************************
 + * Copyright 2005 Mentor Graphics Corporation
 + * Copyright (C) 2005-2006 by Texas Instruments
 + *
 + * This file is part of the Inventra Controller Driver for Linux.
 + *
 + * The Inventra Controller Driver for Linux is free software; you
 + * can redistribute it and/or modify it under the terms of the GNU
 + * General Public License version 2 as published by the Free Software
 + * Foundation.
 + *
 + * The Inventra Controller Driver for Linux is distributed in
 + * the hope that it will be useful, but WITHOUT ANY WARRANTY;
 + * without even the implied warranty of MERCHANTABILITY or
 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
 + * License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with The Inventra Controller Driver for Linux ; if not,
 + * write to the Free Software Foundation, Inc., 59 Temple Place,
 + * Suite 330, Boston, MA  02111-1307  USA
 + *
 + * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
 + * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
 + * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
 + * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
 + * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
 + * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
 + * NON-INFRINGEMENT.  MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
 + * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
 + * GRAPHICS SUPPORT CUSTOMER.
 + ******************************************************************/
 +
 +#include <linux/kernel.h>
 +#include <linux/list.h>
 +#include <linux/timer.h>
 +#include <linux/spinlock.h>
 +#include <linux/init.h>
 +#include <linux/device.h>
 +#include <linux/interrupt.h>
 +
 +#include "musbdefs.h"
 +
 +/* ep0 is always musb->aLocalEnd[0].ep_in */
 +#define       next_ep0_request(musb)  next_in_request(&(musb)->aLocalEnd[0])
 +
 +/*
 + * Locking note:  we use only the controller lock, for simpler correctness.
 + * It's always held with IRQs blocked.
 + *
 + * It protects the ep0 request queue as well as ep0_state, not just the
 + * controller and indexed registers.  And that lock stays held unless it
 + * needs to be dropped to allow reentering this driver ... like upcalls to
 + * the gadget driver, or adjusting endpoint halt status.
 + */
 +
 +static char *decode_ep0stage(u8 stage)
 +{
 +      switch(stage) {
 +      case MGC_END0_STAGE_SETUP:      return "idle";
 +      case MGC_END0_STAGE_TX:         return "in";
 +      case MGC_END0_STAGE_RX:         return "out";
 +      case MGC_END0_STAGE_ACKWAIT:    return "wait";
 +      case MGC_END0_STAGE_STATUSIN:   return "in/status";
 +      case MGC_END0_STAGE_STATUSOUT:  return "out/status";
 +      default:                        return "?";
 +      }
 +}
 +
 +/* handle a standard GET_STATUS request
 + * Context:  caller holds controller lock
 + */
 +static int service_tx_status_request(
 +      struct musb *musb,
 +      const struct usb_ctrlrequest *pControlRequest)
 +{
 +      void __iomem    *pBase = musb->pRegs;
 +      int handled = 1;
 +      u8 bResult[2], bEnd = 0;
 +      const u8 bRecip = pControlRequest->bRequestType & USB_RECIP_MASK;
 +
 +      bResult[1] = 0;
 +
 +      switch (bRecip) {
 +      case USB_RECIP_DEVICE:
 +              bResult[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED;
 +              bResult[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
 +#ifdef CONFIG_USB_MUSB_OTG
 +              if (musb->g.is_otg) {
 +                      bResult[0] |= musb->g.b_hnp_enable
 +                              << USB_DEVICE_B_HNP_ENABLE;
 +                      bResult[0] |= musb->g.a_alt_hnp_support
 +                              << USB_DEVICE_A_ALT_HNP_SUPPORT;
 +                      bResult[0] |= musb->g.a_hnp_support
 +                              << USB_DEVICE_A_HNP_SUPPORT;
 +              }
 +#endif
 +              break;
 +
 +      case USB_RECIP_INTERFACE:
 +              bResult[0] = 0;
 +              break;
 +
 +      case USB_RECIP_ENDPOINT: {
 +              int             is_in;
 +              struct musb_ep  *ep;
 +              u16             tmp;
 +              void __iomem    *regs;
 +
 +              bEnd = (u8) pControlRequest->wIndex;
 +              if (!bEnd) {
 +                      bResult[0] = 0;
 +                      break;
 +              }
 +
 +              is_in = bEnd & USB_DIR_IN;
 +              if (is_in) {
 +                      bEnd &= 0x0f;
 +                      ep = &musb->aLocalEnd[bEnd].ep_in;
 +              } else {
 +                      ep = &musb->aLocalEnd[bEnd].ep_out;
 +              }
 +              regs = musb->aLocalEnd[bEnd].regs;
 +
 +              if (bEnd >= MUSB_C_NUM_EPS || !ep->desc) {
 +                      handled = -EINVAL;
 +                      break;
 +              }
 +
 +              MGC_SelectEnd(pBase, bEnd);
 +              if (is_in)
 +                      tmp = musb_readw(regs, MGC_O_HDRC_TXCSR)
 +                                              & MGC_M_TXCSR_P_SENDSTALL;
 +              else
 +                      tmp = musb_readw(regs, MGC_O_HDRC_RXCSR)
 +                                              & MGC_M_RXCSR_P_SENDSTALL;
 +              MGC_SelectEnd(pBase, 0);
 +
 +              bResult[0] = tmp ? 1 : 0;
 +              } break;
 +
 +      default:
 +              /* class, vendor, etc ... delegate */
 +              handled = 0;
 +              break;
 +      }
 +
 +      /* fill up the fifo; caller updates csr0 */
 +      if (handled > 0) {
 +              u16     len = le16_to_cpu(pControlRequest->wLength);
 +
 +              if (len > 2)
 +                      len = 2;
 +              musb_write_fifo(&musb->aLocalEnd[0], len, bResult);
 +      }
 +
 +      return handled;
 +}
 +
 +/*
 + * handle a control-IN request, the end0 buffer contains the current request
 + * that is supposed to be a standard control request. Assumes the fifo to
 + * be at least 2 bytes long.
 + *
 + * @return 0 if the request was NOT HANDLED,
 + * < 0 when error
 + * > 0 when the request is processed
 + *
 + * Context:  caller holds controller lock
 + */
 +static int
 +service_in_request(struct musb *musb,
 +              const struct usb_ctrlrequest *pControlRequest)
 +{
 +      int handled = 0;        /* not handled */
 +
 +      if ((pControlRequest->bRequestType & USB_TYPE_MASK)
 +                      == USB_TYPE_STANDARD) {
 +              switch (pControlRequest->bRequest) {
 +              case USB_REQ_GET_STATUS:
 +                      handled = service_tx_status_request(musb,
 +                                      pControlRequest);
 +                      break;
 +
 +              /* case USB_REQ_SYNC_FRAME: */
 +
 +              default:
 +                      break;
 +              }
 +      }
 +      return handled;
 +}
 +
 +/*
 + * Context:  caller holds controller lock
 + */
 +static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
 +{
 +      musb->ep0_state = MGC_END0_STAGE_SETUP;
 +      musb_g_giveback(&musb->aLocalEnd[0].ep_in, req, 0);
 +}
 +
 +/*
 + * Tries to start B-device HNP negotiation if enabled via sysfs
 + */
 +static inline void musb_try_b_hnp_enable(struct musb *musb)
 +{
 +      void __iomem    *pBase = musb->pRegs;
 +      u8              devctl;
 +
 +      DBG(1, "HNP: Setting HR\n");
 +      devctl = musb_readb(pBase, MGC_O_HDRC_DEVCTL);
 +      musb_writeb(pBase, MGC_O_HDRC_DEVCTL, devctl | MGC_M_DEVCTL_HR);
 +}
 +
 +/*
 + * Handle all control requests with no DATA stage, including standard
 + * requests such as:
 + * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized
 + *    always delegated to the gadget driver
 + * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE
 + *    always handled here, except for class/vendor/... features
 + *
 + * Context:  caller holds controller lock
 + */
 +static int
 +service_zero_data_request(struct musb *musb,
 +              struct usb_ctrlrequest *pControlRequest)
 +__releases(musb->Lock)
 +__acquires(musb->Lock)
 +{
 +      int handled = -EINVAL;
 +      void __iomem *pBase = musb->pRegs;
 +      const u8 bRecip = pControlRequest->bRequestType & USB_RECIP_MASK;
 +
 +      /* the gadget driver handles everything except what we MUST handle */
 +      if ((pControlRequest->bRequestType & USB_TYPE_MASK)
 +                      == USB_TYPE_STANDARD) {
 +              switch (pControlRequest->bRequest) {
 +              case USB_REQ_SET_ADDRESS:
 +                      /* change it after the status stage */
 +                      musb->bSetAddress = TRUE;
 +                      musb->bAddress = (u8) (pControlRequest->wValue & 0x7f);
 +                      handled = 1;
 +                      break;
 +
 +              case USB_REQ_CLEAR_FEATURE:
 +                      switch (bRecip) {
 +                      case USB_RECIP_DEVICE:
 +                              if (pControlRequest->wValue
 +                                              != USB_DEVICE_REMOTE_WAKEUP)
 +                                      break;
 +                              musb->may_wakeup = 0;
 +                              handled = 1;
 +                              break;
 +                      case USB_RECIP_INTERFACE:
 +                              break;
 +                      case USB_RECIP_ENDPOINT:{
 +                              const u8 bEnd = pControlRequest->wIndex & 0x0f;
 +                              struct musb_ep *pEnd;
 +
 +                              if (bEnd == 0
 +                                              || bEnd >= MUSB_C_NUM_EPS
 +                                              || pControlRequest->wValue
 +                                                      != USB_ENDPOINT_HALT)
 +                                      break;
 +
 +                              if (pControlRequest->wIndex & USB_DIR_IN)
 +                                      pEnd = &musb->aLocalEnd[bEnd].ep_in;
 +                              else
 +                                      pEnd = &musb->aLocalEnd[bEnd].ep_out;
 +                              if (!pEnd->desc)
 +                                      break;
 +
 +                              /* REVISIT do it directly, no locking games */
 +                              spin_unlock(&musb->Lock);
 +                              musb_gadget_set_halt(&pEnd->end_point, 0);
 +                              spin_lock(&musb->Lock);
 +
 +                              /* select ep0 again */
 +                              MGC_SelectEnd(pBase, 0);
 +                              handled = 1;
 +                              } break;
 +                      default:
 +                              /* class, vendor, etc ... delegate */
 +                              handled = 0;
 +                              break;
 +                      }
 +                      break;
 +
 +              case USB_REQ_SET_FEATURE:
 +                      switch (bRecip) {
 +                      case USB_RECIP_DEVICE:
 +                              handled = 1;
 +                              switch (pControlRequest->wValue) {
 +                              case USB_DEVICE_REMOTE_WAKEUP:
 +                                      musb->may_wakeup = 1;
 +                                      break;
 +                              case USB_DEVICE_TEST_MODE:
 +                                      if (musb->g.speed != USB_SPEED_HIGH)
 +                                              goto stall;
 +                                      if (pControlRequest->wIndex & 0xff)
 +                                              goto stall;
 +
 +                                      switch (pControlRequest->wIndex >> 8) {
 +                                      case 1:
 +                                              pr_debug("TEST_J\n");
 +                                              /* TEST_J */
 +                                              musb->bTestModeValue =
 +                                                      MGC_M_TEST_J;
 +                                              break;
 +                                      case 2:
 +                                              /* TEST_K */
 +                                              pr_debug("TEST_K\n");
 +                                              musb->bTestModeValue =
 +                                                      MGC_M_TEST_K;
 +                                              break;
 +                                      case 3:
 +                                              /* TEST_SE0_NAK */
 +                                              pr_debug("TEST_SE0_NAK\n");
 +                                              musb->bTestModeValue =
 +                                                      MGC_M_TEST_SE0_NAK;
 +                                              break;
 +                                      case 4:
 +                                              /* TEST_PACKET */
 +                                              pr_debug("TEST_PACKET\n");
 +                                              musb->bTestModeValue =
 +                                                      MGC_M_TEST_PACKET;
 +                                              break;
 +                                      default:
 +                                              goto stall;
 +                                      }
 +
 +                                      /* enter test mode after irq */
 +                                      if (handled > 0)
 +                                              musb->bTestMode = TRUE;
 +                                      break;
 +#ifdef CONFIG_USB_MUSB_OTG
 +                              case USB_DEVICE_B_HNP_ENABLE:
 +                                      if (!musb->g.is_otg)
 +                                              goto stall;
 +                                      musb->g.b_hnp_enable = 1;
 +                                      musb_try_b_hnp_enable(musb);
 +                                      break;
 +                              case USB_DEVICE_A_HNP_SUPPORT:
 +                                      if (!musb->g.is_otg)
 +                                              goto stall;
 +                                      musb->g.a_hnp_support = 1;
 +                                      break;
 +                              case USB_DEVICE_A_ALT_HNP_SUPPORT:
 +                                      if (!musb->g.is_otg)
 +                                              goto stall;
 +                                      musb->g.a_alt_hnp_support = 1;
 +                                      break;
 +#endif
 +stall:
 +                              default:
 +                                      handled = -EINVAL;
 +                                      break;
 +                              }
 +                              break;
 +
 +                      case USB_RECIP_INTERFACE:
 +                              break;
 +
 +                      case USB_RECIP_ENDPOINT:{
 +                              const u8                bEnd =
 +                                      pControlRequest->wIndex & 0x0f;
 +                              struct musb_ep          *pEnd;
 +                              struct musb_hw_ep       *ep;
 +                              void __iomem            *regs;
 +                              int                     is_in;
 +                              u16                     csr;
 +
 +                              if (bEnd == 0
 +                                              || bEnd >= MUSB_C_NUM_EPS
 +                                              || pControlRequest->wValue
 +                                                      != USB_ENDPOINT_HALT)
 +                                      break;
 +
 +                              ep = musb->aLocalEnd + bEnd;
 +                              regs = ep->regs;
 +                              is_in = pControlRequest->wIndex & USB_DIR_IN;
 +                              if (is_in)
 +                                      pEnd = &ep->ep_in;
 +                              else
 +                                      pEnd = &ep->ep_out;
 +                              if (!pEnd->desc)
 +                                      break;
 +
 +                              MGC_SelectEnd(pBase, bEnd);
 +                              if (is_in) {
 +                                      csr = musb_readw(regs,
 +                                                      MGC_O_HDRC_TXCSR);
 +                                      if (csr & MGC_M_TXCSR_FIFONOTEMPTY)
 +                                              csr |= MGC_M_TXCSR_FLUSHFIFO;
 +                                      csr |= MGC_M_TXCSR_P_SENDSTALL
 +                                              | MGC_M_TXCSR_CLRDATATOG
 +                                              | MGC_M_TXCSR_P_WZC_BITS;
 +                                      musb_writew(regs, MGC_O_HDRC_TXCSR,
 +                                                      csr);
 +                              } else {
 +                                      csr = musb_readw(regs,
 +                                                      MGC_O_HDRC_RXCSR);
 +                                      csr |= MGC_M_RXCSR_P_SENDSTALL
 +                                              | MGC_M_RXCSR_FLUSHFIFO
 +                                              | MGC_M_RXCSR_CLRDATATOG
 +                                              | MGC_M_TXCSR_P_WZC_BITS;
 +                                      musb_writew(regs, MGC_O_HDRC_RXCSR,
 +                                                      csr);
 +                              }
 +
 +                              /* select ep0 again */
 +                              MGC_SelectEnd(pBase, 0);
 +                              handled = 1;
 +                              } break;
 +
 +                      default:
 +                              /* class, vendor, etc ... delegate */
 +                              handled = 0;
 +                              break;
 +                      }
 +                      break;
 +              default:
 +                      /* delegate SET_CONFIGURATION, etc */
 +                      handled = 0;
 +              }
 +      } else
 +              handled = 0;
 +      return handled;
 +}
 +
 +/* we have an ep0out data packet
 + * Context:  caller holds controller lock
 + */
 +static void ep0_rxstate(struct musb *this)
 +{
 +      void __iomem            *regs = this->control_ep->regs;
 +      struct usb_request      *req;
 +      u16                     tmp;
 +
 +      req = next_ep0_request(this);
 +
 +      /* read packet and ack; or stall because of gadget driver bug:
 +       * should have provided the rx buffer before setup() returned.
 +       */
 +      if (req) {
 +              void            *buf = req->buf + req->actual;
 +              unsigned        len = req->length - req->actual;
 +
 +              /* read the buffer */
 +              tmp = musb_readb(regs, MGC_O_HDRC_COUNT0);
 +              if (tmp > len) {
 +                      req->status = -EOVERFLOW;
 +                      tmp = len;
 +              }
 +              musb_read_fifo(&this->aLocalEnd[0], tmp, buf);
 +              req->actual += tmp;
 +              tmp = MGC_M_CSR0_P_SVDRXPKTRDY;
 +              if (tmp < 64 || req->actual == req->length) {
 +                      this->ep0_state = MGC_END0_STAGE_STATUSIN;
 +                      tmp |= MGC_M_CSR0_P_DATAEND;
 +              } else
 +                      req = NULL;
 +      } else
 +              tmp = MGC_M_CSR0_P_SVDRXPKTRDY | MGC_M_CSR0_P_SENDSTALL;
 +      musb_writew(regs, MGC_O_HDRC_CSR0, tmp);
 +
 +
 +      /* NOTE:  we "should" hold off reporting DATAEND and going to
 +       * STATUSIN until after the completion handler decides whether
 +       * to issue a stall instead, since this hardware can do that.
 +       */
 +      if (req)
 +              musb_g_ep0_giveback(this, req);
 +}
 +
 +/*
 + * transmitting to the host (IN), this code might be called from IRQ
 + * and from kernel thread.
 + *
 + * Context:  caller holds controller lock
 + */
 +static void ep0_txstate(struct musb *musb)
 +{
 +      void __iomem            *regs = musb->control_ep->regs;
 +      struct usb_request      *pRequest = next_ep0_request(musb);
 +      u16                     wCsrVal = MGC_M_CSR0_TXPKTRDY;
 +      u8                      *pFifoSource;
 +      u8                      wFifoCount;
 +
 +      if (!pRequest) {
 +              // WARN_ON(1);
 +              DBG(2, "odd; csr0 %04x\n", musb_readw(regs, MGC_O_HDRC_CSR0));
 +              return;
 +      }
 +
 +      /* load the data */
 +      pFifoSource = (u8 *) pRequest->buf + pRequest->actual;
 +      wFifoCount = min((unsigned) MGC_END0_FIFOSIZE,
 +              pRequest->length - pRequest->actual);
 +      musb_write_fifo(&musb->aLocalEnd[0], wFifoCount, pFifoSource);
 +      pRequest->actual += wFifoCount;
 +
 +      /* update the flags */
 +      if (wFifoCount < MUSB_MAX_END0_PACKET
 +                      || pRequest->actual == pRequest->length) {
 +              musb->ep0_state = MGC_END0_STAGE_STATUSOUT;
 +              wCsrVal |= MGC_M_CSR0_P_DATAEND;
 +      } else
 +              pRequest = NULL;
 +
 +      /* send it out, triggering a "txpktrdy cleared" irq */
 +      musb_writew(regs, MGC_O_HDRC_CSR0, wCsrVal);
 +
 +      /* report completions as soon as the fifo's loaded; there's no
 +       * win in waiting till this last packet gets acked.  (other than
 +       * very precise fault reporting, needed by USB TMC; possible with
 +       * this hardware, but not usable from portable gadget drivers.)
 +       */
 +      if (pRequest)
 +              musb_g_ep0_giveback(musb, pRequest);
 +}
 +
 +/*
 + * Read a SETUP packet (struct usb_ctrlrequest) from the hardware.
 + * Fields are left in USB byte-order.
 + *
 + * Context:  caller holds controller lock.
 + */
 +static void
 +musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
 +{
 +      struct usb_request      *r;
 +      void __iomem            *regs = musb->control_ep->regs;
 +
 +      musb_read_fifo(&musb->aLocalEnd[0], sizeof *req, (u8 *)req);
 +
 +      /* NOTE:  earlier 2.6 versions changed setup packets to host
 +       * order, but now USB packets always stay in USB byte order.
 +       */
 +      DBG(3, "SETUP req%02x.%02x v%04x i%04x l%d\n",
 +              req->bRequestType,
 +              req->bRequest,
 +              le16_to_cpu(req->wValue),
 +              le16_to_cpu(req->wIndex),
 +              le16_to_cpu(req->wLength));
 +
 +      /* clean up any leftover transfers */
 +      r = next_ep0_request(musb);
 +      if (r)
 +              musb_g_ep0_giveback(musb, r);
 +
 +      /* For zero-data requests we want to delay the STATUS stage to
 +       * avoid SETUPEND errors.  If we read data (OUT), delay accepting
 +       * packets until there's a buffer to store them in.
 +       *
 +       * If we write data, the controller acts happier if we enable
 +       * the TX FIFO right away, and give the controller a moment
 +       * to switch modes...
 +       */
 +      musb->bSetAddress = FALSE;
 +      musb->ackpend = MGC_M_CSR0_P_SVDRXPKTRDY;
 +      if (req->wLength == 0) {
 +              if (req->bRequestType & USB_DIR_IN)
 +                      musb->ackpend |= MGC_M_CSR0_TXPKTRDY;
 +              musb->ep0_state = MGC_END0_STAGE_ACKWAIT;
 +      } else if (req->bRequestType & USB_DIR_IN) {
 +              musb->ep0_state = MGC_END0_STAGE_TX;
 +              musb_writew(regs, MGC_O_HDRC_CSR0, MGC_M_CSR0_P_SVDRXPKTRDY);
 +              while ((musb_readw(regs, MGC_O_HDRC_CSR0)
 +                              & MGC_M_CSR0_RXPKTRDY) != 0)
 +                      cpu_relax();
 +              musb->ackpend = 0;
 +      } else
 +              musb->ep0_state = MGC_END0_STAGE_RX;
 +}
 +
 +static int
 +forward_to_driver(struct musb *musb,
 +              const struct usb_ctrlrequest *pControlRequest)
 +__releases(musb->Lock)
 +__acquires(musb->Lock)
 +{
 +      int retval;
 +      if (!musb->pGadgetDriver)
 +              return -EOPNOTSUPP;
 +      spin_unlock(&musb->Lock);
 +      retval = musb->pGadgetDriver->setup(&musb->g, pControlRequest);
 +      spin_lock(&musb->Lock);
 +      return retval;
 +}
 +
 +/*
 + * Handle peripheral ep0 interrupt
 + *
 + * Context: irq handler; we won't re-enter the driver that way.
 + */
 +irqreturn_t musb_g_ep0_irq(struct musb *musb)
 +{
 +      u16             wCsrVal;
 +      u16             wCount;
 +      void __iomem    *pBase = musb->pRegs;
 +      void __iomem    *regs = musb->aLocalEnd[0].regs;
 +      irqreturn_t     retval = IRQ_NONE;
 +
 +      MGC_SelectEnd(pBase, 0);        /* select ep0 */
 +      wCsrVal = musb_readw(regs, MGC_O_HDRC_CSR0);
 +      wCount = musb_readb(regs, MGC_O_HDRC_COUNT0);
 +
 +      DBG(4, "csr %04x, count %d, myaddr %d, ep0stage %s\n",
 +                      wCsrVal, wCount,
 +                      musb_readb(pBase, MGC_O_HDRC_FADDR),
 +                      decode_ep0stage(musb->ep0_state));
 +
 +      /* I sent a stall.. need to acknowledge it now.. */
 +      if (wCsrVal & MGC_M_CSR0_P_SENTSTALL) {
 +              musb_writew(regs, MGC_O_HDRC_CSR0,
 +                              wCsrVal & ~MGC_M_CSR0_P_SENTSTALL);
 +              retval = IRQ_HANDLED;
 +              musb->ep0_state = MGC_END0_STAGE_SETUP;
 +              wCsrVal = musb_readw(regs, MGC_O_HDRC_CSR0);
 +      }
 +
 +      /* request ended "early" */
 +      if (wCsrVal & MGC_M_CSR0_P_SETUPEND) {
 +              musb_writew(regs, MGC_O_HDRC_CSR0, MGC_M_CSR0_P_SVDSETUPEND);
 +              retval = IRQ_HANDLED;
 +              musb->ep0_state = MGC_END0_STAGE_SETUP;
 +              wCsrVal = musb_readw(regs, MGC_O_HDRC_CSR0);
 +              /* NOTE:  request may need completion */
 +      }
 +
 +      /* docs from Mentor only describe tx, rx, and idle/setup states.
 +       * we need to handle nuances around status stages, and also the
 +       * case where status and setup stages come back-to-back ...
 +       */
 +      switch (musb->ep0_state) {
 +
 +      case MGC_END0_STAGE_TX:
 +              /* irq on clearing txpktrdy */
 +              if ((wCsrVal & MGC_M_CSR0_TXPKTRDY) == 0) {
 +                      ep0_txstate(musb);
 +                      retval = IRQ_HANDLED;
 +              }
 +              break;
 +
 +      case MGC_END0_STAGE_RX:
 +              /* irq on set rxpktrdy */
 +              if (wCsrVal & MGC_M_CSR0_RXPKTRDY) {
 +                      ep0_rxstate(musb);
 +                      retval = IRQ_HANDLED;
 +              }
 +              break;
 +
 +      case MGC_END0_STAGE_STATUSIN:
 +              /* end of sequence #2 (OUT/RX state) or #3 (no data) */
 +
 +              /* update address (if needed) only @ the end of the
 +               * status phase per usb spec, which also guarantees
 +               * we get 10 msec to receive this irq... until this
 +               * is done we won't see the next packet.
 +               */
 +              if (musb->bSetAddress) {
 +                      musb->bSetAddress = FALSE;
 +                      musb_writeb(pBase, MGC_O_HDRC_FADDR, musb->bAddress);
 +              }
 +
 +              /* enter test mode if needed (exit by reset) */
 +              else if (musb->bTestMode) {
 +                      DBG(1, "entering TESTMODE\n");
 +
 +                      if (MGC_M_TEST_PACKET == musb->bTestModeValue)
 +                              musb_load_testpacket(musb);
 +
 +                      musb_writeb(pBase, MGC_O_HDRC_TESTMODE,
 +                                      musb->bTestModeValue);
 +              }
 +              /* FALLTHROUGH */
 +
 +      case MGC_END0_STAGE_STATUSOUT:
 +              /* end of sequence #1: write to host (TX state) */
 +              {
 +                      struct usb_request      *req;
 +
 +                      req = next_ep0_request(musb);
 +                      if (req)
 +                              musb_g_ep0_giveback(musb, req);
 +              }
 +              retval = IRQ_HANDLED;
 +              musb->ep0_state = MGC_END0_STAGE_SETUP;
 +              /* FALLTHROUGH */
 +
 +      case MGC_END0_STAGE_SETUP:
 +              if (wCsrVal & MGC_M_CSR0_RXPKTRDY) {
 +                      struct usb_ctrlrequest  setup;
 +                      int                     handled = 0;
 +
 +                      if (wCount != 8) {
 +                              ERR("SETUP packet len %d != 8 ?\n", wCount);
 +                              break;
 +                      }
 +                      musb_read_setup(musb, &setup);
 +                      retval = IRQ_HANDLED;
 +
 +                      /* sometimes the RESET won't be reported */
 +                      if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) {
 +                              u8      power;
 +
 +                              printk(KERN_NOTICE "%s: peripheral reset "
 +                                              "irq lost!\n",
 +                                              musb_driver_name);
 +                              power = musb_readb(pBase, MGC_O_HDRC_POWER);
 +                              musb->g.speed = (power & MGC_M_POWER_HSMODE)
 +                                      ? USB_SPEED_HIGH : USB_SPEED_FULL;
 +
 +                      }
 +
 +                      switch (musb->ep0_state) {
 +
 +                      /* sequence #3 (no data stage), includes requests
 +                       * we can't forward (notably SET_ADDRESS and the
 +                       * device/endpoint feature set/clear operations)
 +                       * plus SET_CONFIGURATION and others we must
 +                       */
 +                      case MGC_END0_STAGE_ACKWAIT:
 +                              handled = service_zero_data_request(
 +                                              musb, &setup);
 +
 +                              /* status stage might be immediate */
 +                              if (handled > 0) {
 +                                      musb->ackpend |= MGC_M_CSR0_P_DATAEND;
 +                                      musb->ep0_state =
 +                                              MGC_END0_STAGE_STATUSIN;
 +                              }
 +                              break;
 +
 +                      /* sequence #1 (IN to host), includes GET_STATUS
 +                       * requests that we can't forward, GET_DESCRIPTOR
 +                       * and others that we must
 +                       */
 +                      case MGC_END0_STAGE_TX:
 +                              handled = service_in_request(musb, &setup);
 +                              if (handled > 0) {
 +                                      musb->ackpend = MGC_M_CSR0_TXPKTRDY
 +                                              | MGC_M_CSR0_P_DATAEND;
 +                                      musb->ep0_state =
 +                                              MGC_END0_STAGE_STATUSOUT;
 +                              }
 +                              break;
 +
 +                      /* sequence #2 (OUT from host), always forward */
 +                      default:                /* MGC_END0_STAGE_RX */
 +                              break;
 +                      }
 +
 +                      DBG(3, "handled %d, csr %04x, ep0stage %s\n",
 +                              handled, wCsrVal,
 +                              decode_ep0stage(musb->ep0_state));
 +
 +                      /* unless we need to delegate this to the gadget
 +                       * driver, we know how to wrap this up:  csr0 has
 +                       * not yet been written.
 +                       */
 +                      if (handled < 0)
 +                              goto stall;
 +                      else if (handled > 0)
 +                              goto finish;
 +
 +                      handled = forward_to_driver(musb, &setup);
 +                      if (handled < 0) {
 +                              MGC_SelectEnd(pBase, 0);
 +stall:
 +                              DBG(3, "stall (%d)\n", handled);
 +                              musb->ackpend |= MGC_M_CSR0_P_SENDSTALL;
 +                              musb->ep0_state = MGC_END0_STAGE_SETUP;
 +finish:
 +                              musb_writew(regs, MGC_O_HDRC_CSR0,
 +                                              musb->ackpend);
 +                              musb->ackpend = 0;
 +                      }
 +              }
 +              break;
 +
 +      case MGC_END0_STAGE_ACKWAIT:
 +              /* This should not happen. But happens with tusb6010 with
 +               * g_file_storage and high speed. Do nothing.
 +               */
 +              retval = IRQ_HANDLED;
 +              break;
 +
 +      default:
 +              /* "can't happen" */
 +              WARN_ON(1);
 +              musb_writew(regs, MGC_O_HDRC_CSR0, MGC_M_CSR0_P_SENDSTALL);
 +              musb->ep0_state = MGC_END0_STAGE_SETUP;
 +              break;
 +      }
 +
 +      return retval;
 +}
 +
 +
 +static int
 +musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)
 +{
 +      /* always enabled */
 +      return -EINVAL;
 +}
 +
 +static int musb_g_ep0_disable(struct usb_ep *e)
 +{
 +      /* always enabled */
 +      return -EINVAL;
 +}
 +
-       .alloc_buffer   = musb_g_ep0_alloc_buffer,
-       .free_buffer    = musb_g_ep0_free_buffer,
 +static int
 +musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
 +{
 +      struct musb_ep          *ep;
 +      struct musb_request     *req;
 +      struct musb             *musb;
 +      int                     status;
 +      unsigned long           lockflags;
 +      void __iomem            *regs;
 +
 +      if (!e || !r)
 +              return -EINVAL;
 +
 +      ep = to_musb_ep(e);
 +      musb = ep->pThis;
 +      regs = musb->control_ep->regs;
 +
 +      req = to_musb_request(r);
 +      req->musb = musb;
 +      req->request.actual = 0;
 +      req->request.status = -EINPROGRESS;
 +      req->bTx = ep->is_in;
 +
 +      spin_lock_irqsave(&musb->Lock, lockflags);
 +
 +      if (!list_empty(&ep->req_list)) {
 +              status = -EBUSY;
 +              goto cleanup;
 +      }
 +
 +      switch (musb->ep0_state) {
 +      case MGC_END0_STAGE_RX:         /* control-OUT data */
 +      case MGC_END0_STAGE_TX:         /* control-IN data */
 +      case MGC_END0_STAGE_ACKWAIT:    /* zero-length data */
 +              status = 0;
 +              break;
 +      default:
 +              DBG(1, "ep0 request queued in state %d\n",
 +                              musb->ep0_state);
 +              status = -EINVAL;
 +              goto cleanup;
 +      }
 +
 +      /* add request to the list */
 +      list_add_tail(&(req->request.list), &(ep->req_list));
 +
 +      DBG(3, "queue to %s (%s), length=%d\n",
 +                      ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
 +                      req->request.length);
 +
 +      MGC_SelectEnd(musb->pRegs, 0);
 +
 +      /* sequence #1, IN ... start writing the data */
 +      if (musb->ep0_state == MGC_END0_STAGE_TX)
 +              ep0_txstate(musb);
 +
 +      /* sequence #3, no-data ... issue IN status */
 +      else if (musb->ep0_state == MGC_END0_STAGE_ACKWAIT) {
 +              if (req->request.length)
 +                      status = -EINVAL;
 +              else {
 +                      musb->ep0_state = MGC_END0_STAGE_STATUSIN;
 +                      musb_writew(regs, MGC_O_HDRC_CSR0,
 +                                      musb->ackpend | MGC_M_CSR0_P_DATAEND);
 +                      musb->ackpend = 0;
 +                      musb_g_ep0_giveback(ep->pThis, r);
 +              }
 +
 +      /* else for sequence #2 (OUT), caller provides a buffer
 +       * before the next packet arrives.  deferred responses
 +       * (after SETUP is acked) are racey.
 +       */
 +      } else if (musb->ackpend) {
 +              musb_writew(regs, MGC_O_HDRC_CSR0, musb->ackpend);
 +              musb->ackpend = 0;
 +      }
 +
 +cleanup:
 +      spin_unlock_irqrestore(&musb->Lock, lockflags);
 +      return status;
 +}
 +
 +static int
 +musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
 +{
 +      /* we just won't support this */
 +      return -EINVAL;
 +}
 +
 +static int musb_g_ep0_halt(struct usb_ep *e, int value)
 +{
 +      struct musb_ep          *ep;
 +      struct musb             *musb;
 +      void __iomem            *base, *regs;
 +      unsigned long           flags;
 +      int                     status;
 +      u16                     csr;
 +
 +      if (!e || !value)
 +              return -EINVAL;
 +
 +      ep = to_musb_ep(e);
 +      musb = ep->pThis;
 +      base = musb->pRegs;
 +      regs = musb->control_ep->regs;
 +
 +      spin_lock_irqsave(&musb->Lock, flags);
 +
 +      if (!list_empty(&ep->req_list)) {
 +              status = -EBUSY;
 +              goto cleanup;
 +      }
 +
 +      switch (musb->ep0_state) {
 +      case MGC_END0_STAGE_TX:         /* control-IN data */
 +      case MGC_END0_STAGE_ACKWAIT:    /* STALL for zero-length data */
 +      case MGC_END0_STAGE_RX:         /* control-OUT data */
 +              status = 0;
 +
 +              MGC_SelectEnd(base, 0);
 +              csr = musb_readw(regs, MGC_O_HDRC_CSR0);
 +              csr |= MGC_M_CSR0_P_SENDSTALL;
 +              musb_writew(regs, MGC_O_HDRC_CSR0, csr);
 +              musb->ep0_state = MGC_END0_STAGE_SETUP;
 +              break;
 +      default:
 +              DBG(1, "ep0 can't halt in state %d\n", musb->ep0_state);
 +              status = -EINVAL;
 +      }
 +
 +cleanup:
 +      spin_unlock_irqrestore(&musb->Lock, flags);
 +      return status;
 +}
 +
 +const struct usb_ep_ops musb_g_ep0_ops = {
 +      .enable         = musb_g_ep0_enable,
 +      .disable        = musb_g_ep0_disable,
 +      .alloc_request  = musb_alloc_request,
 +      .free_request   = musb_free_request,
 +      .queue          = musb_g_ep0_queue,
 +      .dequeue        = musb_g_ep0_dequeue,
 +      .set_halt       = musb_g_ep0_halt,
 +      .fifo_status    = NULL,
 +      .fifo_flush     = NULL,
 +};
index 1b2c829,0000000..2c74b47
mode 100644,000000..100644
--- /dev/null
@@@ -1,2091 -1,0 +1,2036 @@@
- /*
-  * dma-coherent memory allocation (for dma-capable endpoints)
-  *
-  * NOTE: the dma_*_coherent() API calls suck; most implementations are
-  * (a) page-oriented, so small buffers lose big, and (b) asymmetric with
-  * respect to calls with irqs disabled:  alloc is safe, free is not.
-  */
- static void *musb_gadget_alloc_buffer(struct usb_ep *ep, unsigned bytes,
-                       dma_addr_t * dma, gfp_t gfp_flags)
- {
-       struct musb_ep *musb_ep = to_musb_ep(ep);
-       return dma_alloc_coherent(musb_ep->pThis->controller,
-                       bytes, dma, gfp_flags);
- }
- static DEFINE_SPINLOCK(buflock);
 +/******************************************************************
 + * Copyright 2005 Mentor Graphics Corporation
 + * Copyright (C) 2005-2006 by Texas Instruments
 + *
 + * This file is part of the Inventra Controller Driver for Linux.
 + *
 + * The Inventra Controller Driver for Linux is free software; you
 + * can redistribute it and/or modify it under the terms of the GNU
 + * General Public License version 2 as published by the Free Software
 + * Foundation.
 + *
 + * The Inventra Controller Driver for Linux is distributed in
 + * the hope that it will be useful, but WITHOUT ANY WARRANTY;
 + * without even the implied warranty of MERCHANTABILITY or
 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
 + * License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with The Inventra Controller Driver for Linux ; if not,
 + * write to the Free Software Foundation, Inc., 59 Temple Place,
 + * Suite 330, Boston, MA  02111-1307  USA
 + *
 + * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
 + * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
 + * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
 + * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
 + * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
 + * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
 + * NON-INFRINGEMENT.  MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
 + * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
 + * GRAPHICS SUPPORT CUSTOMER.
 + ******************************************************************/
 +
 +#include <linux/kernel.h>
 +#include <linux/list.h>
 +#include <linux/timer.h>
 +#include <linux/module.h>
 +#include <linux/smp.h>
 +#include <linux/spinlock.h>
 +#include <linux/delay.h>
 +#include <linux/moduleparam.h>
 +#include <linux/stat.h>
 +#include <linux/dma-mapping.h>
 +
 +#include "musbdefs.h"
 +
 +
 +/* MUSB PERIPHERAL status 3-mar:
 + *
 + * - EP0 seems solid.  It passes both USBCV and usbtest control cases.
 + *   Minor glitches:
 + *
 + *     + remote wakeup to Linux hosts work, but saw USBCV failures;
 + *       in one test run (operator error?)
 + *     + endpoint halt tests -- in both usbtest and usbcv -- seem
 + *       to break when dma is enabled ... is something wrongly
 + *       clearing SENDSTALL?
 + *
 + * - Mass storage behaved ok when last tested.  Network traffic patterns
 + *   (with lots of short transfers etc) need retesting; they turn up the
 + *   worst cases of the DMA, since short packets are typical but are not
 + *   required.
 + *
 + * - TX/IN
 + *     + both pio and dma behave in with network and g_zero tests
 + *     + no cppi throughput issues other than no-hw-queueing
 + *     + failed with FLAT_REG (DaVinci)
 + *     + seems to behave with double buffering, PIO -and- CPPI
 + *     + with gadgetfs + AIO, requests got lost?
 + *
 + * - RX/OUT
 + *     + both pio and dma behave in with network and g_zero tests
 + *     + dma is slow in typical case (short_not_ok is clear)
 + *     + double buffering ok with PIO
 + *     + double buffering *FAILS* with CPPI, wrong data bytes sometimes
 + *     + request lossage observed with gadgetfs
 + *
 + * - ISO not tested ... might work, but only weakly isochronous
 + *
 + * - Gadget driver disabling of softconnect during bind() is ignored; so
 + *   drivers can't hold off host requests until userspace is ready.
 + *   (Workaround:  they can turn it off later.)
 + *
 + * - PORTABILITY (assumes PIO works):
 + *     + DaVinci, basically works with cppi dma
 + *     + OMAP 2430, ditto with mentor dma
 + *     + TUSB 6010, platform-specific dma in the works
 + */
 +
 +/**************************************************************************
 +Handling completion
 +**************************************************************************/
 +
 +/*
 + * Immediately complete a request.
 + *
 + * @param pRequest the request to complete
 + * @param status the status to complete the request with
 + * Context: controller locked, IRQs blocked.
 + */
 +void musb_g_giveback(
 +      struct musb_ep          *ep,
 +      struct usb_request      *pRequest,
 +      int status)
 +__releases(ep->musb->Lock)
 +__acquires(ep->musb->Lock)
 +{
 +      struct musb_request     *req;
 +      struct musb             *musb;
 +      int                     busy = ep->busy;
 +
 +      req = to_musb_request(pRequest);
 +
 +      list_del(&pRequest->list);
 +      if (req->request.status == -EINPROGRESS)
 +              req->request.status = status;
 +      musb = req->musb;
 +
 +      ep->busy = 1;
 +      spin_unlock(&musb->Lock);
 +      if (is_dma_capable()) {
 +              if (req->mapped) {
 +                      dma_unmap_single(musb->controller,
 +                                      req->request.dma,
 +                                      req->request.length,
 +                                      req->bTx
 +                                              ? DMA_TO_DEVICE
 +                                              : DMA_FROM_DEVICE);
 +                      req->request.dma = DMA_ADDR_INVALID;
 +                      req->mapped = 0;
 +              } else if (req->request.dma != DMA_ADDR_INVALID)
 +                      dma_sync_single_for_cpu(musb->controller,
 +                                      req->request.dma,
 +                                      req->request.length,
 +                                      req->bTx
 +                                              ? DMA_TO_DEVICE
 +                                              : DMA_FROM_DEVICE);
 +      }
 +      if (pRequest->status == 0)
 +              DBG(5, "%s done request %p,  %d/%d\n",
 +                              ep->end_point.name, pRequest,
 +                              req->request.actual, req->request.length);
 +      else
 +              DBG(2, "%s request %p, %d/%d fault %d\n",
 +                              ep->end_point.name, pRequest,
 +                              req->request.actual, req->request.length,
 +                              pRequest->status);
 +      req->request.complete(&req->ep->end_point, &req->request);
 +      spin_lock(&musb->Lock);
 +      ep->busy = busy;
 +}
 +
 +/* ----------------------------------------------------------------------- */
 +
 +/*
 + * Abort requests queued to an endpoint using the status. Synchronous.
 + * caller locked controller and blocked irqs, and selected this ep.
 + */
 +static void nuke(struct musb_ep *ep, const int status)
 +{
 +      struct musb_request     *req = NULL;
 +      void __iomem *epio = ep->pThis->aLocalEnd[ep->bEndNumber].regs;
 +
 +      ep->busy = 1;
 +
 +      if (is_dma_capable() && ep->dma) {
 +              struct dma_controller   *c = ep->pThis->pDmaController;
 +              int value;
 +              if (ep->is_in) {
 +                      musb_writew(epio, MGC_O_HDRC_TXCSR,
 +                                      0 | MGC_M_TXCSR_FLUSHFIFO);
 +                      musb_writew(epio, MGC_O_HDRC_TXCSR,
 +                                      0 | MGC_M_TXCSR_FLUSHFIFO);
 +              } else {
 +                      musb_writew(epio, MGC_O_HDRC_RXCSR,
 +                                      0 | MGC_M_RXCSR_FLUSHFIFO);
 +                      musb_writew(epio, MGC_O_HDRC_RXCSR,
 +                                      0 | MGC_M_RXCSR_FLUSHFIFO);
 +              }
 +
 +              value = c->channel_abort(ep->dma);
 +              DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
 +              c->channel_release(ep->dma);
 +              ep->dma = NULL;
 +      }
 +
 +      while (!list_empty(&(ep->req_list))) {
 +              req = container_of(ep->req_list.next, struct musb_request,
 +                              request.list);
 +              musb_g_giveback(ep, &req->request, status);
 +      }
 +}
 +
 +/**************************************************************************
 + * TX/IN and RX/OUT Data transfers
 + **************************************************************************/
 +
 +/*
 + * This assumes the separate CPPI engine is responding to DMA requests
 + * from the usb core ... sequenced a bit differently from mentor dma.
 + */
 +
 +static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
 +{
 +      if (can_bulk_split(musb, ep->type))
 +              return ep->hw_ep->wMaxPacketSizeTx;
 +      else
 +              return ep->wPacketSize;
 +}
 +
 +
 +#ifdef CONFIG_USB_INVENTRA_DMA
 +
 +/* Peripheral tx (IN) using Mentor DMA works as follows:
 +      Only mode 0 is used for transfers <= wPktSize,
 +      mode 1 is used for larger transfers,
 +
 +      One of the following happens:
 +      - Host sends IN token which causes an endpoint interrupt
 +              -> TxAvail
 +                      -> if DMA is currently busy, exit.
 +                      -> if queue is non-empty, txstate().
 +
 +      - Request is queued by the gadget driver.
 +              -> if queue was previously empty, txstate()
 +
 +      txstate()
 +              -> start
 +                /\    -> setup DMA
 +                |     (data is transferred to the FIFO, then sent out when
 +                |     IN token(s) are recd from Host.
 +                |             -> DMA interrupt on completion
 +                |                calls TxAvail.
 +                |                   -> stop DMA, ~DmaEenab,
 +                |                   -> set TxPktRdy for last short pkt or zlp
 +                |                   -> Complete Request
 +                |                   -> Continue next request (call txstate)
 +                |___________________________________|
 +
 + * Non-Mentor DMA engines can of course work differently, such as by
 + * upleveling from irq-per-packet to irq-per-buffer.
 + */
 +
 +#endif
 +
 +/*
 + * An endpoint is transmitting data. This can be called either from
 + * the IRQ routine or from ep.queue() to kickstart a request on an
 + * endpoint.
 + *
 + * Context: controller locked, IRQs blocked, endpoint selected
 + */
 +static void txstate(struct musb *musb, struct musb_request *req)
 +{
 +      u8                      bEnd = req->bEnd;
 +      struct musb_ep          *pEnd;
 +      void __iomem            *epio = musb->aLocalEnd[bEnd].regs;
 +      struct usb_request      *pRequest;
 +      u16                     wFifoCount = 0, wCsrVal;
 +      int                     use_dma = 0;
 +
 +      pEnd = req->ep;
 +
 +      /* we shouldn't get here while DMA is active ... but we do ... */
 +      if (dma_channel_status(pEnd->dma) == MGC_DMA_STATUS_BUSY) {
 +              DBG(4, "dma pending...\n");
 +              return;
 +      }
 +
 +      /* read TXCSR before */
 +      wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
 +
 +      pRequest = &req->request;
 +      wFifoCount = min(max_ep_writesize(musb, pEnd),
 +                      (int)(pRequest->length - pRequest->actual));
 +
 +      if (wCsrVal & MGC_M_TXCSR_TXPKTRDY) {
 +              DBG(5, "%s old packet still ready , txcsr %03x\n",
 +                              pEnd->end_point.name, wCsrVal);
 +              return;
 +      }
 +
 +      if (wCsrVal & MGC_M_TXCSR_P_SENDSTALL) {
 +              DBG(5, "%s stalling, txcsr %03x\n",
 +                              pEnd->end_point.name, wCsrVal);
 +              return;
 +      }
 +
 +      DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
 +                      bEnd, pEnd->wPacketSize, wFifoCount,
 +                      wCsrVal);
 +
 +#ifndef       CONFIG_USB_INVENTRA_FIFO
 +      if (is_dma_capable() && pEnd->dma) {
 +              struct dma_controller   *c = musb->pDmaController;
 +
 +              use_dma = (pRequest->dma != DMA_ADDR_INVALID);
 +
 +              /* MGC_M_TXCSR_P_ISO is still set correctly */
 +
 +#ifdef CONFIG_USB_INVENTRA_DMA
 +              {
 +                      size_t request_size;
 +
 +                      /* setup DMA, then program endpoint CSR */
 +                      request_size = min(pRequest->length,
 +                                              pEnd->dma->dwMaxLength);
 +                      if (request_size <= pEnd->wPacketSize)
 +                              pEnd->dma->bDesiredMode = 0;
 +                      else
 +                              pEnd->dma->bDesiredMode = 1;
 +
 +                      use_dma = use_dma && c->channel_program(
 +                                      pEnd->dma, pEnd->wPacketSize,
 +                                      pEnd->dma->bDesiredMode,
 +                                      pRequest->dma, request_size);
 +                      if (use_dma) {
 +                              if (pEnd->dma->bDesiredMode == 0) {
 +                                      /* ASSERT: DMAENAB is clear */
 +                                      wCsrVal &= ~(MGC_M_TXCSR_AUTOSET |
 +                                                      MGC_M_TXCSR_DMAMODE);
 +                                      wCsrVal |= (MGC_M_TXCSR_DMAENAB |
 +                                                      MGC_M_TXCSR_MODE);
 +                                      // against programming guide
 +                              }
 +                              else
 +                                      wCsrVal |= (MGC_M_TXCSR_AUTOSET
 +                                                      | MGC_M_TXCSR_DMAENAB
 +                                                      | MGC_M_TXCSR_DMAMODE
 +                                                      | MGC_M_TXCSR_MODE);
 +
 +                              wCsrVal &= ~MGC_M_TXCSR_P_UNDERRUN;
 +                              musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
 +                      }
 +              }
 +
 +#elif defined(CONFIG_USB_TI_CPPI_DMA)
 +              /* program endpoint CSR first, then setup DMA */
 +              wCsrVal &= ~(MGC_M_TXCSR_AUTOSET
 +                              | MGC_M_TXCSR_DMAMODE
 +                              | MGC_M_TXCSR_P_UNDERRUN
 +                              | MGC_M_TXCSR_TXPKTRDY);
 +              wCsrVal |= MGC_M_TXCSR_MODE | MGC_M_TXCSR_DMAENAB;
 +              musb_writew(epio, MGC_O_HDRC_TXCSR,
 +                      (MGC_M_TXCSR_P_WZC_BITS & ~MGC_M_TXCSR_P_UNDERRUN)
 +                              | wCsrVal);
 +
 +              /* ensure writebuffer is empty */
 +              wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
 +
 +              /* NOTE host side sets DMAENAB later than this; both are
 +               * OK since the transfer dma glue (between CPPI and Mentor
 +               * fifos) just tells CPPI it could start.  Data only moves
 +               * to the USB TX fifo when both fifos are ready.
 +               */
 +
 +              /* "mode" is irrelevant here; handle terminating ZLPs like
 +               * PIO does, since the hardware RNDIS mode seems unreliable
 +               * except for the last-packet-is-already-short case.
 +               */
 +              use_dma = use_dma && c->channel_program(
 +                              pEnd->dma, pEnd->wPacketSize,
 +                              0,
 +                              pRequest->dma,
 +                              pRequest->length);
 +              if (!use_dma) {
 +                      c->channel_release(pEnd->dma);
 +                      pEnd->dma = NULL;
 +                      /* ASSERT: DMAENAB clear */
 +                      wCsrVal &= ~(MGC_M_TXCSR_DMAMODE | MGC_M_TXCSR_MODE);
 +                      /* invariant: prequest->buf is non-null */
 +              }
 +#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
 +              use_dma = use_dma && c->channel_program(
 +                              pEnd->dma, pEnd->wPacketSize,
 +                              pRequest->zero,
 +                              pRequest->dma,
 +                              pRequest->length);
 +#endif
 +      }
 +#endif
 +
 +      if (!use_dma) {
 +              musb_write_fifo(pEnd->hw_ep, wFifoCount,
 +                              (u8 *) (pRequest->buf + pRequest->actual));
 +              pRequest->actual += wFifoCount;
 +              wCsrVal |= MGC_M_TXCSR_TXPKTRDY;
 +              wCsrVal &= ~MGC_M_TXCSR_P_UNDERRUN;
 +              musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
 +      }
 +
 +      /* host may already have the data when this message shows... */
 +      DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
 +                      pEnd->end_point.name, use_dma ? "dma" : "pio",
 +                      pRequest->actual, pRequest->length,
 +                      musb_readw(epio, MGC_O_HDRC_TXCSR),
 +                      wFifoCount,
 +                      musb_readw(epio, MGC_O_HDRC_TXMAXP));
 +}
 +
 +/*
 + * FIFO state update (e.g. data ready).
 + * Called from IRQ,  with controller locked.
 + */
 +void musb_g_tx(struct musb *musb, u8 bEnd)
 +{
 +      u16                     wCsrVal;
 +      struct usb_request      *pRequest;
 +      u8 __iomem              *pBase = musb->pRegs;
 +      struct musb_ep          *pEnd = &musb->aLocalEnd[bEnd].ep_in;
 +      void __iomem            *epio = musb->aLocalEnd[bEnd].regs;
 +      struct dma_channel      *dma;
 +
 +      MGC_SelectEnd(pBase, bEnd);
 +      pRequest = next_request(pEnd);
 +
 +      wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
 +      DBG(4, "<== %s, txcsr %04x\n", pEnd->end_point.name, wCsrVal);
 +
 +      dma = is_dma_capable() ? pEnd->dma : NULL;
 +      do {
 +              /* REVISIT for high bandwidth, MGC_M_TXCSR_P_INCOMPTX
 +               * probably rates reporting as a host error
 +               */
 +              if (wCsrVal & MGC_M_TXCSR_P_SENTSTALL) {
 +                      wCsrVal |= MGC_M_TXCSR_P_WZC_BITS;
 +                      wCsrVal &= ~MGC_M_TXCSR_P_SENTSTALL;
 +                      musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
 +                      if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
 +                              dma->bStatus = MGC_DMA_STATUS_CORE_ABORT;
 +                              musb->pDmaController->channel_abort(dma);
 +                      }
 +
 +                      if (pRequest)
 +                              musb_g_giveback(pEnd, pRequest, -EPIPE);
 +
 +                      break;
 +              }
 +
 +              if (wCsrVal & MGC_M_TXCSR_P_UNDERRUN) {
 +                      /* we NAKed, no big deal ... little reason to care */
 +                      wCsrVal |= MGC_M_TXCSR_P_WZC_BITS;
 +                      wCsrVal &= ~(MGC_M_TXCSR_P_UNDERRUN
 +                                      | MGC_M_TXCSR_TXPKTRDY);
 +                      musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
 +                      DBG(20, "underrun on ep%d, req %p\n", bEnd, pRequest);
 +              }
 +
 +              if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
 +                      /* SHOULD NOT HAPPEN ... has with cppi though, after
 +                       * changing SENDSTALL (and other cases); harmless?
 +                       */
 +                      DBG(5, "%s dma still busy?\n", pEnd->end_point.name);
 +                      break;
 +              }
 +
 +              if (pRequest) {
 +                      u8      is_dma = 0;
 +
 +                      if (dma && (wCsrVal & MGC_M_TXCSR_DMAENAB)) {
 +                              is_dma = 1;
 +                              wCsrVal |= MGC_M_TXCSR_P_WZC_BITS;
 +                              wCsrVal &= ~(MGC_M_TXCSR_DMAENAB
 +                                              | MGC_M_TXCSR_P_UNDERRUN
 +                                              | MGC_M_TXCSR_TXPKTRDY);
 +                              musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
 +                              /* ensure writebuffer is empty */
 +                              wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
 +                              pRequest->actual += pEnd->dma->dwActualLength;
 +                              DBG(4, "TXCSR%d %04x, dma off, "
 +                                              "len %Zd, req %p\n",
 +                                      bEnd, wCsrVal,
 +                                      pEnd->dma->dwActualLength,
 +                                      pRequest);
 +                      }
 +
 +                      if (is_dma || pRequest->actual == pRequest->length) {
 +
 +                              /* First, maybe a terminating short packet.
 +                               * Some DMA engines might handle this by
 +                               * themselves.
 +                               */
 +                              if ((pRequest->zero
 +                                              && pRequest->length
 +                                              && (pRequest->length
 +                                                      % pEnd->wPacketSize)
 +                                                      == 0)
 +#ifdef CONFIG_USB_INVENTRA_DMA
 +                                      || (is_dma &&
 +                                              ((!dma->bDesiredMode) ||
 +                                                  (pRequest->actual &
 +                                                  (pEnd->wPacketSize - 1))))
 +#endif
 +                              ) {
 +                                      /* on dma completion, fifo may not
 +                                       * be available yet ...
 +                                       */
 +                                      if (wCsrVal & MGC_M_TXCSR_TXPKTRDY)
 +                                              break;
 +
 +                                      DBG(4, "sending zero pkt\n");
 +                                      musb_writew(epio, MGC_O_HDRC_TXCSR,
 +                                                      MGC_M_TXCSR_MODE
 +                                                      | MGC_M_TXCSR_TXPKTRDY);
 +                                      pRequest->zero = 0;
 +                              }
 +
 +                              /* ... or if not, then complete it */
 +                              musb_g_giveback(pEnd, pRequest, 0);
 +
 +                              /* kickstart next transfer if appropriate;
 +                               * the packet that just completed might not
 +                               * be transmitted for hours or days.
 +                               * REVISIT for double buffering...
 +                               * FIXME revisit for stalls too...
 +                               */
 +                              MGC_SelectEnd(pBase, bEnd);
 +                              wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
 +                              if (wCsrVal & MGC_M_TXCSR_FIFONOTEMPTY)
 +                                      break;
 +                              pRequest = pEnd->desc
 +                                              ? next_request(pEnd)
 +                                              : NULL;
 +                              if (!pRequest) {
 +                                      DBG(4, "%s idle now\n",
 +                                                      pEnd->end_point.name);
 +                                      break;
 +                              }
 +                      }
 +
 +                      txstate(musb, to_musb_request(pRequest));
 +              }
 +
 +      } while (0);
 +}
 +
 +/* ------------------------------------------------------------ */
 +
 +#ifdef CONFIG_USB_INVENTRA_DMA
 +
 +/* Peripheral rx (OUT) using Mentor DMA works as follows:
 +      - Only mode 0 is used.
 +
 +      - Request is queued by the gadget class driver.
 +              -> if queue was previously empty, rxstate()
 +
 +      - Host sends OUT token which causes an endpoint interrupt
 +        /\      -> RxReady
 +        |           -> if request queued, call rxstate
 +        |             /\      -> setup DMA
 +        |             |            -> DMA interrupt on completion
 +        |             |               -> RxReady
 +        |             |                     -> stop DMA
 +        |             |                     -> ack the read
 +        |             |                     -> if data recd = max expected
 +        |             |                               by the request, or host
 +        |             |                               sent a short packet,
 +        |             |                               complete the request,
 +        |             |                               and start the next one.
 +        |             |_____________________________________|
 +        |                                      else just wait for the host
 +        |                                         to send the next OUT token.
 +        |__________________________________________________|
 +
 + * Non-Mentor DMA engines can of course work differently.
 + */
 +
 +#endif
 +
 +/*
 + * Context: controller locked, IRQs blocked, endpoint selected
 + */
 +static void rxstate(struct musb *musb, struct musb_request *req)
 +{
 +      u16                     wCsrVal = 0;
 +      const u8                bEnd = req->bEnd;
 +      struct usb_request      *pRequest = &req->request;
 +      struct musb_ep          *pEnd = &musb->aLocalEnd[bEnd].ep_out;
 +      void __iomem            *epio = musb->aLocalEnd[bEnd].regs;
 +      u16                     wFifoCount = 0;
 +      u16                     wCount = pEnd->wPacketSize;
 +
 +      wCsrVal = musb_readw(epio, MGC_O_HDRC_RXCSR);
 +
 +      if (is_cppi_enabled() && pEnd->dma) {
 +              struct dma_controller   *c = musb->pDmaController;
 +              struct dma_channel      *channel = pEnd->dma;
 +
 +              /* NOTE:  CPPI won't actually stop advancing the DMA
 +               * queue after short packet transfers, so this is almost
 +               * always going to run as IRQ-per-packet DMA so that
 +               * faults will be handled correctly.
 +               */
 +              if (c->channel_program(channel,
 +                              pEnd->wPacketSize,
 +                              !pRequest->short_not_ok,
 +                              pRequest->dma + pRequest->actual,
 +                              pRequest->length - pRequest->actual)) {
 +
 +                      /* make sure that if an rxpkt arrived after the irq,
 +                       * the cppi engine will be ready to take it as soon
 +                       * as DMA is enabled
 +                       */
 +                      wCsrVal &= ~(MGC_M_RXCSR_AUTOCLEAR
 +                                      | MGC_M_RXCSR_DMAMODE);
 +                      wCsrVal |= MGC_M_RXCSR_DMAENAB | MGC_M_RXCSR_P_WZC_BITS;
 +                      musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
 +                      return;
 +              }
 +      }
 +
 +      if (wCsrVal & MGC_M_RXCSR_RXPKTRDY) {
 +              wCount = musb_readw(epio, MGC_O_HDRC_RXCOUNT);
 +              if (pRequest->actual < pRequest->length) {
 +#ifdef CONFIG_USB_INVENTRA_DMA
 +                      if (is_dma_capable() && pEnd->dma) {
 +                              struct dma_controller   *c;
 +                              struct dma_channel      *channel;
 +                              int                     use_dma = 0;
 +
 +                              c = musb->pDmaController;
 +                              channel = pEnd->dma;
 +
 +      /* We use DMA Req mode 0 in RxCsr, and DMA controller operates in
 +       * mode 0 only. So we do not get endpoint interrupts due to DMA
 +       * completion. We only get interrupts from DMA controller.
 +       *
 +       * We could operate in DMA mode 1 if we knew the size of the tranfer
 +       * in advance. For mass storage class, request->length = what the host
 +       * sends, so that'd work.  But for pretty much everything else,
 +       * request->length is routinely more than what the host sends. For
 +       * most these gadgets, end of is signified either by a short packet,
 +       * or filling the last byte of the buffer.  (Sending extra data in
 +       * that last pckate should trigger an overflow fault.)  But in mode 1,
 +       * we don't get DMA completion interrrupt for short packets.
 +       *
 +       * Theoretically, we could enable DMAReq interrupt (RxCsr_DMAMODE = 1),
 +       * to get endpoint interrupt on every DMA req, but that didn't seem
 +       * to work reliably.
 +       *
 +       * REVISIT an updated g_file_storage can set req->short_not_ok, which
 +       * then becomes usable as a runtime "use mode 1" hint...
 +       */
 +
 +                              wCsrVal |= MGC_M_RXCSR_DMAENAB;
 +#ifdef USE_MODE1
 +                              wCsrVal |= MGC_M_RXCSR_AUTOCLEAR;
 +//                            wCsrVal |= MGC_M_RXCSR_DMAMODE;
 +
 +                              /* this special sequence (enabling and then
 +                                 disabling MGC_M_RXCSR_DMAMODE) is required
 +                                 to get DMAReq to activate
 +                               */
 +                              musb_writew(epio, MGC_O_HDRC_RXCSR,
 +                                      wCsrVal | MGC_M_RXCSR_DMAMODE);
 +#endif
 +                              musb_writew(epio, MGC_O_HDRC_RXCSR,
 +                                              wCsrVal);
 +
 +                              if (pRequest->actual < pRequest->length) {
 +                                      int transfer_size = 0;
 +#ifdef USE_MODE1
 +                                      transfer_size = min(pRequest->length,
 +                                                      channel->dwMaxLength);
 +#else
 +                                      transfer_size = wCount;
 +#endif
 +                                      if (transfer_size <= pEnd->wPacketSize)
 +                                              pEnd->dma->bDesiredMode = 0;
 +                                      else
 +                                              pEnd->dma->bDesiredMode = 1;
 +
 +                                      use_dma = c->channel_program(
 +                                                      channel,
 +                                                      pEnd->wPacketSize,
 +                                                      channel->bDesiredMode,
 +                                                      pRequest->dma
 +                                                      + pRequest->actual,
 +                                                      transfer_size);
 +                              }
 +
 +                              if (use_dma)
 +                                      return;
 +                      }
 +#endif        /* Mentor's USB */
 +
 +                      wFifoCount = pRequest->length - pRequest->actual;
 +                      DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
 +                                      pEnd->end_point.name,
 +                                      wCount, wFifoCount,
 +                                      pEnd->wPacketSize);
 +
 +                      wFifoCount = min(wCount, wFifoCount);
 +
 +#ifdef        CONFIG_USB_TUSB_OMAP_DMA
 +                      if (tusb_dma_omap() && pEnd->dma) {
 +                              struct dma_controller *c = musb->pDmaController;
 +                              struct dma_channel *channel = pEnd->dma;
 +                              u32 dma_addr = pRequest->dma + pRequest->actual;
 +                              int ret;
 +
 +                              ret = c->channel_program(channel,
 +                                              pEnd->wPacketSize,
 +                                              channel->bDesiredMode,
 +                                              dma_addr,
 +                                              wFifoCount);
 +                              if (ret == TRUE)
 +                                      return;
 +                      }
 +#endif
 +
 +                      musb_read_fifo(pEnd->hw_ep, wFifoCount, (u8 *)
 +                                      (pRequest->buf + pRequest->actual));
 +                      pRequest->actual += wFifoCount;
 +
 +                      /* REVISIT if we left anything in the fifo, flush
 +                       * it and report -EOVERFLOW
 +                       */
 +
 +                      /* ack the read! */
 +                      wCsrVal |= MGC_M_RXCSR_P_WZC_BITS;
 +                      wCsrVal &= ~MGC_M_RXCSR_RXPKTRDY;
 +                      musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
 +              }
 +      }
 +
 +      /* reach the end or short packet detected */
 +      if (pRequest->actual == pRequest->length || wCount < pEnd->wPacketSize)
 +              musb_g_giveback(pEnd, pRequest, 0);
 +}
 +
 +/*
 + * Data ready for a request; called from IRQ
 + */
 +void musb_g_rx(struct musb *musb, u8 bEnd)
 +{
 +      u16                     wCsrVal;
 +      struct usb_request      *pRequest;
 +      void __iomem            *pBase = musb->pRegs;
 +      struct musb_ep          *pEnd = &musb->aLocalEnd[bEnd].ep_out;
 +      void __iomem            *epio = musb->aLocalEnd[bEnd].regs;
 +      struct dma_channel      *dma;
 +
 +      MGC_SelectEnd(pBase, bEnd);
 +
 +      pRequest = next_request(pEnd);
 +
 +      wCsrVal = musb_readw(epio, MGC_O_HDRC_RXCSR);
 +      dma = is_dma_capable() ? pEnd->dma : NULL;
 +
 +      DBG(4, "<== %s, rxcsr %04x%s %p\n", pEnd->end_point.name,
 +                      wCsrVal, dma ? " (dma)" : "", pRequest);
 +
 +      if (wCsrVal & MGC_M_RXCSR_P_SENTSTALL) {
 +              if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
 +                      dma->bStatus = MGC_DMA_STATUS_CORE_ABORT;
 +                      (void) musb->pDmaController->channel_abort(dma);
 +                      pRequest->actual += pEnd->dma->dwActualLength;
 +              }
 +
 +              wCsrVal |= MGC_M_RXCSR_P_WZC_BITS;
 +              wCsrVal &= ~MGC_M_RXCSR_P_SENTSTALL;
 +              musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
 +
 +              if (pRequest)
 +                      musb_g_giveback(pEnd, pRequest, -EPIPE);
 +              goto done;
 +      }
 +
 +      if (wCsrVal & MGC_M_RXCSR_P_OVERRUN) {
 +              // wCsrVal |= MGC_M_RXCSR_P_WZC_BITS;
 +              wCsrVal &= ~MGC_M_RXCSR_P_OVERRUN;
 +              musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
 +
 +              DBG(3, "%s iso overrun on %p\n", pEnd->name, pRequest);
 +              if (pRequest && pRequest->status == -EINPROGRESS)
 +                      pRequest->status = -EOVERFLOW;
 +      }
 +      if (wCsrVal & MGC_M_RXCSR_INCOMPRX) {
 +              /* REVISIT not necessarily an error */
 +              DBG(4, "%s, incomprx\n", pEnd->end_point.name);
 +      }
 +
 +      if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
 +              /* "should not happen"; likely RXPKTRDY pending for DMA */
 +              DBG((wCsrVal & MGC_M_RXCSR_DMAENAB) ? 4 : 1,
 +                      "%s busy, csr %04x\n",
 +                      pEnd->end_point.name, wCsrVal);
 +              goto done;
 +      }
 +
 +      if (dma && (wCsrVal & MGC_M_RXCSR_DMAENAB)) {
 +              wCsrVal &= ~(MGC_M_RXCSR_AUTOCLEAR
 +                              | MGC_M_RXCSR_DMAENAB
 +                              | MGC_M_RXCSR_DMAMODE);
 +              musb_writew(epio, MGC_O_HDRC_RXCSR,
 +                      MGC_M_RXCSR_P_WZC_BITS | wCsrVal);
 +
 +              pRequest->actual += pEnd->dma->dwActualLength;
 +
 +              DBG(4, "RXCSR%d %04x, dma off, %04x, len %Zd, req %p\n",
 +                      bEnd, wCsrVal,
 +                      musb_readw(epio, MGC_O_HDRC_RXCSR),
 +                      pEnd->dma->dwActualLength, pRequest);
 +
 +#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
 +              /* Autoclear doesn't clear RxPktRdy for short packets */
 +              if ((dma->bDesiredMode == 0)
 +                              || (dma->dwActualLength
 +                                      & (pEnd->wPacketSize - 1))) {
 +                      /* ack the read! */
 +                      wCsrVal &= ~MGC_M_RXCSR_RXPKTRDY;
 +                      musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
 +              }
 +
 +              /* incomplete, and not short? wait for next IN packet */
 +                if ((pRequest->actual < pRequest->length)
 +                              && (pEnd->dma->dwActualLength
 +                                      == pEnd->wPacketSize))
 +                      goto done;
 +#endif
 +              musb_g_giveback(pEnd, pRequest, 0);
 +
 +              pRequest = next_request(pEnd);
 +              if (!pRequest)
 +                      goto done;
 +
 +              /* don't start more i/o till the stall clears */
 +              MGC_SelectEnd(pBase, bEnd);
 +              wCsrVal = musb_readw(epio, MGC_O_HDRC_RXCSR);
 +              if (wCsrVal & MGC_M_RXCSR_P_SENDSTALL)
 +                      goto done;
 +      }
 +
 +
 +      /* analyze request if the ep is hot */
 +      if (pRequest)
 +              rxstate(musb, to_musb_request(pRequest));
 +      else
 +              DBG(3, "packet waiting for %s%s request\n",
 +                              pEnd->desc ? "" : "inactive ",
 +                              pEnd->end_point.name);
 +
 +done:
 +      return;
 +}
 +
 +/* ------------------------------------------------------------ */
 +
 +static int musb_gadget_enable(struct usb_ep *ep,
 +                      const struct usb_endpoint_descriptor *desc)
 +{
 +      unsigned long           flags;
 +      struct musb_ep          *pEnd;
 +      struct musb_hw_ep       *hw_ep;
 +      void __iomem            *regs;
 +      struct musb             *musb;
 +      void __iomem    *pBase;
 +      u8              bEnd;
 +      u16             csr;
 +      unsigned        tmp;
 +      int             status = -EINVAL;
 +
 +      if (!ep || !desc)
 +              return -EINVAL;
 +
 +      pEnd = to_musb_ep(ep);
 +      hw_ep = pEnd->hw_ep;
 +      regs = hw_ep->regs;
 +      musb = pEnd->pThis;
 +      pBase = musb->pRegs;
 +      bEnd = pEnd->bEndNumber;
 +
 +      spin_lock_irqsave(&musb->Lock, flags);
 +
 +      if (pEnd->desc) {
 +              status = -EBUSY;
 +              goto fail;
 +      }
 +      pEnd->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
 +
 +      /* check direction and (later) maxpacket size against endpoint */
 +      if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != bEnd)
 +              goto fail;
 +
 +      /* REVISIT this rules out high bandwidth periodic transfers */
 +      tmp = le16_to_cpu(desc->wMaxPacketSize);
 +      if (tmp & ~0x07ff)
 +              goto fail;
 +      pEnd->wPacketSize = tmp;
 +
 +      /* enable the interrupts for the endpoint, set the endpoint
 +       * packet size (or fail), set the mode, clear the fifo
 +       */
 +      MGC_SelectEnd(pBase, bEnd);
 +      if (desc->bEndpointAddress & USB_DIR_IN) {
 +              u16 wIntrTxE = musb_readw(pBase, MGC_O_HDRC_INTRTXE);
 +
 +              if (hw_ep->bIsSharedFifo)
 +                      pEnd->is_in = 1;
 +              if (!pEnd->is_in)
 +                      goto fail;
 +              if (tmp > hw_ep->wMaxPacketSizeTx)
 +                      goto fail;
 +
 +              wIntrTxE |= (1 << bEnd);
 +              musb_writew(pBase, MGC_O_HDRC_INTRTXE, wIntrTxE);
 +
 +              /* REVISIT if can_bulk_split(), use by updating "tmp";
 +               * likewise high bandwidth periodic tx
 +               */
 +              musb_writew(regs, MGC_O_HDRC_TXMAXP, tmp);
 +
 +              csr = MGC_M_TXCSR_MODE | MGC_M_TXCSR_CLRDATATOG;
 +              if (musb_readw(regs, MGC_O_HDRC_TXCSR)
 +                              & MGC_M_TXCSR_FIFONOTEMPTY)
 +                      csr |= MGC_M_TXCSR_FLUSHFIFO;
 +              if (pEnd->type == USB_ENDPOINT_XFER_ISOC)
 +                      csr |= MGC_M_TXCSR_P_ISO;
 +
 +              /* set twice in case of double buffering */
 +              musb_writew(regs, MGC_O_HDRC_TXCSR, csr);
 +              /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
 +              musb_writew(regs, MGC_O_HDRC_TXCSR, csr);
 +
 +      } else {
 +              u16 wIntrRxE = musb_readw(pBase, MGC_O_HDRC_INTRRXE);
 +
 +              if (hw_ep->bIsSharedFifo)
 +                      pEnd->is_in = 0;
 +              if (pEnd->is_in)
 +                      goto fail;
 +              if (tmp > hw_ep->wMaxPacketSizeRx)
 +                      goto fail;
 +
 +              wIntrRxE |= (1 << bEnd);
 +              musb_writew(pBase, MGC_O_HDRC_INTRRXE, wIntrRxE);
 +
 +              /* REVISIT if can_bulk_combine() use by updating "tmp"
 +               * likewise high bandwidth periodic rx
 +               */
 +              musb_writew(regs, MGC_O_HDRC_RXMAXP, tmp);
 +
 +              /* force shared fifo to OUT-only mode */
 +              if (hw_ep->bIsSharedFifo) {
 +                      csr = musb_readw(regs, MGC_O_HDRC_TXCSR);
 +                      csr &= ~(MGC_M_TXCSR_MODE | MGC_M_TXCSR_TXPKTRDY);
 +                      musb_writew(regs, MGC_O_HDRC_TXCSR, csr);
 +              }
 +
 +              csr = MGC_M_RXCSR_FLUSHFIFO | MGC_M_RXCSR_CLRDATATOG;
 +              if (pEnd->type == USB_ENDPOINT_XFER_ISOC)
 +                      csr |= MGC_M_RXCSR_P_ISO;
 +              else if (pEnd->type == USB_ENDPOINT_XFER_INT)
 +                      csr |= MGC_M_RXCSR_DISNYET;
 +
 +              /* set twice in case of double buffering */
 +              musb_writew(regs, MGC_O_HDRC_RXCSR, csr);
 +              musb_writew(regs, MGC_O_HDRC_RXCSR, csr);
 +      }
 +
 +      /* NOTE:  all the I/O code _should_ work fine without DMA, in case
 +       * for some reason you run out of channels here.
 +       */
 +      if (is_dma_capable() && musb->pDmaController) {
 +              struct dma_controller   *c = musb->pDmaController;
 +
 +              pEnd->dma = c->channel_alloc(c, hw_ep,
 +                              (desc->bEndpointAddress & USB_DIR_IN));
 +      } else
 +              pEnd->dma = NULL;
 +
 +      pEnd->desc = desc;
 +      pEnd->busy = 0;
 +      status = 0;
 +
 +      pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
 +                      musb_driver_name, pEnd->end_point.name,
 +                      ({ char *s; switch (pEnd->type) {
 +                      case USB_ENDPOINT_XFER_BULK:    s = "bulk"; break;
 +                      case USB_ENDPOINT_XFER_INT:     s = "int"; break;
 +                      default:                        s = "iso"; break;
 +                      }; s; }),
 +                      pEnd->is_in ? "IN" : "OUT",
 +                      pEnd->dma ? "dma, " : "",
 +                      pEnd->wPacketSize);
 +
 +      schedule_work(&musb->irq_work);
 +
 +fail:
 +      spin_unlock_irqrestore(&musb->Lock, flags);
 +      return status;
 +}
 +
 +/*
 + * Disable an endpoint flushing all requests queued.
 + */
 +static int musb_gadget_disable(struct usb_ep *ep)
 +{
 +      unsigned long   flags;
 +      struct musb     *musb;
 +      u8              bEnd;
 +      struct musb_ep  *pEnd;
 +      void __iomem    *epio;
 +      int             status = 0;
 +
 +      pEnd = to_musb_ep(ep);
 +      musb = pEnd->pThis;
 +      bEnd = pEnd->bEndNumber;
 +      epio = musb->aLocalEnd[bEnd].regs;
 +
 +      spin_lock_irqsave(&musb->Lock, flags);
 +      MGC_SelectEnd(musb->pRegs, bEnd);
 +
 +      /* zero the endpoint sizes */
 +      if (pEnd->is_in) {
 +              u16 wIntrTxE = musb_readw(musb->pRegs, MGC_O_HDRC_INTRTXE);
 +              wIntrTxE &= ~(1 << bEnd);
 +              musb_writew(musb->pRegs, MGC_O_HDRC_INTRTXE, wIntrTxE);
 +              musb_writew(epio, MGC_O_HDRC_TXMAXP, 0);
 +      } else {
 +              u16 wIntrRxE = musb_readw(musb->pRegs, MGC_O_HDRC_INTRRXE);
 +              wIntrRxE &= ~(1 << bEnd);
 +              musb_writew(musb->pRegs, MGC_O_HDRC_INTRRXE, wIntrRxE);
 +              musb_writew(epio, MGC_O_HDRC_RXMAXP, 0);
 +      }
 +
 +      pEnd->desc = NULL;
 +
 +      /* abort all pending DMA and requests */
 +      nuke(pEnd, -ESHUTDOWN);
 +
 +      schedule_work(&musb->irq_work);
 +
 +      spin_unlock_irqrestore(&(musb->Lock), flags);
 +
 +      DBG(2, "%s\n", pEnd->end_point.name);
 +
 +      return status;
 +}
 +
 +/*
 + * Allocate a request for an endpoint.
 + * Reused by ep0 code.
 + */
 +struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
 +{
 +      struct musb_ep          *musb_ep = to_musb_ep(ep);
 +      struct musb_request     *pRequest = NULL;
 +
 +      pRequest = kzalloc(sizeof *pRequest, gfp_flags);
 +      if (pRequest) {
 +              INIT_LIST_HEAD(&pRequest->request.list);
 +              pRequest->request.dma = DMA_ADDR_INVALID;
 +              pRequest->bEnd = musb_ep->bEndNumber;
 +              pRequest->ep = musb_ep;
 +      }
 +
 +      return &pRequest->request;
 +}
 +
 +/*
 + * Free a request
 + * Reused by ep0 code.
 + */
 +void musb_free_request(struct usb_ep *ep, struct usb_request *req)
 +{
 +      kfree(to_musb_request(req));
 +}
 +
- static void do_free(unsigned long ignored)
- {
-       spin_lock_irq(&buflock);
-       while (!list_empty(&buffers)) {
-               struct free_record      *buf;
-               buf = list_entry(buffers.next, struct free_record, list);
-               list_del(&buf->list);
-               spin_unlock_irq(&buflock);
-               dma_free_coherent(buf->dev, buf->bytes, buf, buf->dma);
-               spin_lock_irq(&buflock);
-       }
-       spin_unlock_irq(&buflock);
- }
- static DECLARE_TASKLET(deferred_free, do_free, 0);
- static void musb_gadget_free_buffer(struct usb_ep *ep,
-               void *address, dma_addr_t dma, unsigned bytes)
- {
-       struct musb_ep          *musb_ep = to_musb_ep(ep);
-       struct free_record      *buf = address;
-       unsigned long           flags;
-       buf->dev = musb_ep->pThis->controller;
-       buf->bytes = bytes;
-       buf->dma = dma;
-       spin_lock_irqsave(&buflock, flags);
-       list_add_tail(&buf->list, &buffers);
-       tasklet_schedule(&deferred_free);
-       spin_unlock_irqrestore(&buflock, flags);
- }
 +static LIST_HEAD(buffers);
 +
 +struct free_record {
 +      struct list_head        list;
 +      struct device           *dev;
 +      unsigned                bytes;
 +      dma_addr_t              dma;
 +};
 +
-       .alloc_buffer   = musb_gadget_alloc_buffer,
-       .free_buffer    = musb_gadget_free_buffer,
 +/*
 + * Context: controller locked, IRQs blocked.
 + */
 +static void musb_ep_restart(struct musb *musb, struct musb_request *req)
 +{
 +      DBG(3, "<== %s request %p len %u on hw_ep%d\n",
 +              req->bTx ? "TX/IN" : "RX/OUT",
 +              &req->request, req->request.length, req->bEnd);
 +
 +      MGC_SelectEnd(musb->pRegs, req->bEnd);
 +      if (req->bTx)
 +              txstate(musb, req);
 +      else
 +              rxstate(musb, req);
 +}
 +
 +static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
 +                      gfp_t gfp_flags)
 +{
 +      struct musb_ep          *pEnd;
 +      struct musb_request     *pRequest;
 +      struct musb             *musb;
 +      int                     status = 0;
 +      unsigned long           lockflags;
 +
 +      if (!ep || !req)
 +              return -EINVAL;
 +      if (!req->buf)
 +              return -ENODATA;
 +
 +      pEnd = to_musb_ep(ep);
 +      musb = pEnd->pThis;
 +
 +      pRequest = to_musb_request(req);
 +      pRequest->musb = musb;
 +
 +      if (pRequest->ep != pEnd)
 +              return -EINVAL;
 +
 +      DBG(4, "<== to %s request=%p\n", ep->name, req);
 +
 +      /* request is mine now... */
 +      pRequest->request.actual = 0;
 +      pRequest->request.status = -EINPROGRESS;
 +      pRequest->bEnd = pEnd->bEndNumber;
 +      pRequest->bTx = pEnd->is_in;
 +
 +      if (is_dma_capable() && pEnd->dma) {
 +              if (pRequest->request.dma == DMA_ADDR_INVALID) {
 +                      pRequest->request.dma = dma_map_single(
 +                                      musb->controller,
 +                                      pRequest->request.buf,
 +                                      pRequest->request.length,
 +                                      pRequest->bTx
 +                                              ? DMA_TO_DEVICE
 +                                              : DMA_FROM_DEVICE);
 +                      pRequest->mapped = 1;
 +              } else {
 +                      dma_sync_single_for_device(musb->controller,
 +                                      pRequest->request.dma,
 +                                      pRequest->request.length,
 +                                      pRequest->bTx
 +                                              ? DMA_TO_DEVICE
 +                                              : DMA_FROM_DEVICE);
 +                      pRequest->mapped = 0;
 +              }
 +      } else if (!req->buf) {
 +              return -ENODATA;
 +      } else
 +              pRequest->mapped = 0;
 +
 +      spin_lock_irqsave(&musb->Lock, lockflags);
 +
 +      /* don't queue if the ep is down */
 +      if (!pEnd->desc) {
 +              DBG(4, "req %p queued to %s while ep %s\n",
 +                              req, ep->name, "disabled");
 +              status = -ESHUTDOWN;
 +              goto cleanup;
 +      }
 +
 +      /* add pRequest to the list */
 +      list_add_tail(&(pRequest->request.list), &(pEnd->req_list));
 +
 +      /* it this is the head of the queue, start i/o ... */
 +      if (!pEnd->busy && &pRequest->request.list == pEnd->req_list.next)
 +              musb_ep_restart(musb, pRequest);
 +
 +cleanup:
 +      spin_unlock_irqrestore(&musb->Lock, lockflags);
 +      return status;
 +}
 +
 +static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *pRequest)
 +{
 +      struct musb_ep          *pEnd = to_musb_ep(ep);
 +      struct usb_request      *r;
 +      unsigned long           flags;
 +      int                     status = 0;
 +      struct musb             *musb = pEnd->pThis;
 +
 +      if (!ep || !pRequest || to_musb_request(pRequest)->ep != pEnd)
 +              return -EINVAL;
 +
 +      spin_lock_irqsave(&musb->Lock, flags);
 +
 +      list_for_each_entry(r, &pEnd->req_list, list) {
 +              if (r == pRequest)
 +                      break;
 +      }
 +      if (r != pRequest) {
 +              DBG(3, "request %p not queued to %s\n", pRequest, ep->name);
 +              status = -EINVAL;
 +              goto done;
 +      }
 +
 +      /* if the hardware doesn't have the request, easy ... */
 +      if (pEnd->req_list.next != &pRequest->list || pEnd->busy)
 +              musb_g_giveback(pEnd, pRequest, -ECONNRESET);
 +
 +      /* ... else abort the dma transfer ... */
 +      else if (is_dma_capable() && pEnd->dma) {
 +              struct dma_controller   *c = musb->pDmaController;
 +
 +              MGC_SelectEnd(musb->pRegs, pEnd->bEndNumber);
 +              if (c->channel_abort)
 +                      status = c->channel_abort(pEnd->dma);
 +              else
 +                      status = -EBUSY;
 +              if (status == 0)
 +                      musb_g_giveback(pEnd, pRequest, -ECONNRESET);
 +      } else {
 +              /* NOTE: by sticking to easily tested hardware/driver states,
 +               * we leave counting of in-flight packets imprecise.
 +               */
 +              musb_g_giveback(pEnd, pRequest, -ECONNRESET);
 +      }
 +
 +done:
 +      spin_unlock_irqrestore(&musb->Lock, flags);
 +      return status;
 +}
 +
 +/*
 + * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
 + * data but will queue requests.
 + *
 + * exported to ep0 code
 + */
 +int musb_gadget_set_halt(struct usb_ep *ep, int value)
 +{
 +      struct musb_ep          *pEnd = to_musb_ep(ep);
 +      u8                      bEnd = pEnd->bEndNumber;
 +      struct musb             *musb = pEnd->pThis;
 +      void __iomem            *epio = musb->aLocalEnd[bEnd].regs;
 +      void __iomem            *pBase;
 +      unsigned long           flags;
 +      u16                     wCsr;
 +      struct musb_request     *pRequest = NULL;
 +      int                     status = 0;
 +
 +      if (!ep)
 +              return -EINVAL;
 +      pBase = musb->pRegs;
 +
 +      spin_lock_irqsave(&musb->Lock, flags);
 +
 +      if ((USB_ENDPOINT_XFER_ISOC == pEnd->type)) {
 +              status = -EINVAL;
 +              goto done;
 +      }
 +
 +      MGC_SelectEnd(pBase, bEnd);
 +
 +      /* cannot portably stall with non-empty FIFO */
 +      pRequest = to_musb_request(next_request(pEnd));
 +      if (value && pEnd->is_in) {
 +              wCsr = musb_readw(epio, MGC_O_HDRC_TXCSR);
 +              if (wCsr & MGC_M_TXCSR_FIFONOTEMPTY) {
 +                      DBG(3, "%s fifo busy, cannot halt\n", ep->name);
 +                      spin_unlock_irqrestore(&musb->Lock, flags);
 +                      return -EAGAIN;
 +              }
 +
 +      }
 +
 +      /* set/clear the stall and toggle bits */
 +      DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
 +      if (pEnd->is_in) {
 +              wCsr = musb_readw(epio, MGC_O_HDRC_TXCSR);
 +              if (wCsr & MGC_M_TXCSR_FIFONOTEMPTY)
 +                      wCsr |= MGC_M_TXCSR_FLUSHFIFO;
 +              wCsr |= MGC_M_TXCSR_P_WZC_BITS
 +                      | MGC_M_TXCSR_CLRDATATOG;
 +              if (value)
 +                      wCsr |= MGC_M_TXCSR_P_SENDSTALL;
 +              else
 +                      wCsr &= ~(MGC_M_TXCSR_P_SENDSTALL
 +                              | MGC_M_TXCSR_P_SENTSTALL);
 +              wCsr &= ~MGC_M_TXCSR_TXPKTRDY;
 +              musb_writew(epio, MGC_O_HDRC_TXCSR, wCsr);
 +      } else {
 +              wCsr = musb_readw(epio, MGC_O_HDRC_RXCSR);
 +              wCsr |= MGC_M_RXCSR_P_WZC_BITS
 +                      | MGC_M_RXCSR_FLUSHFIFO
 +                      | MGC_M_RXCSR_CLRDATATOG;
 +              if (value)
 +                      wCsr |= MGC_M_RXCSR_P_SENDSTALL;
 +              else
 +                      wCsr &= ~(MGC_M_RXCSR_P_SENDSTALL
 +                              | MGC_M_RXCSR_P_SENTSTALL);
 +              musb_writew(epio, MGC_O_HDRC_RXCSR, wCsr);
 +      }
 +
 +done:
 +
 +      /* maybe start the first request in the queue */
 +      if (!pEnd->busy && !value && pRequest) {
 +              DBG(3, "restarting the request\n");
 +              musb_ep_restart(musb, pRequest);
 +      }
 +
 +      spin_unlock_irqrestore(&musb->Lock, flags);
 +      return status;
 +}
 +
 +static int musb_gadget_fifo_status(struct usb_ep *ep)
 +{
 +      struct musb_ep          *musb_ep = to_musb_ep(ep);
 +      void __iomem            *epio = musb_ep->hw_ep->regs;
 +      int                     retval = -EINVAL;
 +
 +      if (musb_ep->desc && !musb_ep->is_in) {
 +              struct musb             *musb = musb_ep->pThis;
 +              int                     bEnd = musb_ep->bEndNumber;
 +              void __iomem            *mbase = musb->pRegs;
 +              unsigned long           flags;
 +
 +              spin_lock_irqsave(&musb->Lock, flags);
 +
 +              MGC_SelectEnd(mbase, bEnd);
 +              /* FIXME return zero unless RXPKTRDY is set */
 +              retval = musb_readw(epio, MGC_O_HDRC_RXCOUNT);
 +
 +              spin_unlock_irqrestore(&musb->Lock, flags);
 +      }
 +      return retval;
 +}
 +
 +static void musb_gadget_fifo_flush(struct usb_ep *ep)
 +{
 +      struct musb_ep  *musb_ep = to_musb_ep(ep);
 +      struct musb     *musb = musb_ep->pThis;
 +      u8              nEnd = musb_ep->bEndNumber;
 +      void __iomem    *epio = musb->aLocalEnd[nEnd].regs;
 +      void __iomem    *mbase;
 +      unsigned long   flags;
 +      u16             wCsr, wIntrTxE;
 +
 +      mbase = musb->pRegs;
 +
 +      spin_lock_irqsave(&musb->Lock, flags);
 +      MGC_SelectEnd(mbase, (u8) nEnd);
 +
 +      /* disable interrupts */
 +      wIntrTxE = musb_readw(mbase, MGC_O_HDRC_INTRTXE);
 +      musb_writew(mbase, MGC_O_HDRC_INTRTXE, wIntrTxE & ~(1 << nEnd));
 +
 +      if (musb_ep->is_in) {
 +              wCsr = musb_readw(epio, MGC_O_HDRC_TXCSR);
 +              if (wCsr & MGC_M_TXCSR_FIFONOTEMPTY) {
 +                      wCsr |= MGC_M_TXCSR_FLUSHFIFO | MGC_M_TXCSR_P_WZC_BITS;
 +                      musb_writew(epio, MGC_O_HDRC_TXCSR, wCsr);
 +                      /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
 +                      musb_writew(epio, MGC_O_HDRC_TXCSR, wCsr);
 +              }
 +      } else {
 +              wCsr = musb_readw(epio, MGC_O_HDRC_RXCSR);
 +              wCsr |= MGC_M_RXCSR_FLUSHFIFO | MGC_M_RXCSR_P_WZC_BITS;
 +              musb_writew(epio, MGC_O_HDRC_RXCSR, wCsr);
 +              musb_writew(epio, MGC_O_HDRC_RXCSR, wCsr);
 +      }
 +
 +      /* re-enable interrupt */
 +      musb_writew(mbase, MGC_O_HDRC_INTRTXE, wIntrTxE);
 +      spin_unlock_irqrestore(&musb->Lock, flags);
 +}
 +
 +static const struct usb_ep_ops musb_ep_ops = {
 +      .enable         = musb_gadget_enable,
 +      .disable        = musb_gadget_disable,
 +      .alloc_request  = musb_alloc_request,
 +      .free_request   = musb_free_request,
 +      .queue          = musb_gadget_queue,
 +      .dequeue        = musb_gadget_dequeue,
 +      .set_halt       = musb_gadget_set_halt,
 +      .fifo_status    = musb_gadget_fifo_status,
 +      .fifo_flush     = musb_gadget_fifo_flush
 +};
 +
 +/***********************************************************************/
 +
 +static int musb_gadget_get_frame(struct usb_gadget *gadget)
 +{
 +      struct musb     *musb = gadget_to_musb(gadget);
 +
 +      return (int)musb_readw(musb->pRegs, MGC_O_HDRC_FRAME);
 +}
 +
 +static int musb_gadget_wakeup(struct usb_gadget *gadget)
 +{
 +      struct musb     *musb = gadget_to_musb(gadget);
 +      void __iomem    *mregs = musb->pRegs;
 +      unsigned long   flags;
 +      int             status = -EINVAL;
 +      u8              power, devctl;
 +      int             retries;
 +
 +      spin_lock_irqsave(&musb->Lock, flags);
 +
 +      switch (musb->xceiv.state) {
 +      case OTG_STATE_B_PERIPHERAL:
 +              /* NOTE:  OTG state machine doesn't include B_SUSPENDED;
 +               * that's part of the standard usb 1.1 state machine, and
 +               * doesn't affect OTG transitions.
 +               */
 +              if (musb->may_wakeup && musb->is_suspended)
 +                      break;
 +              goto done;
 +      case OTG_STATE_B_IDLE:
 +              /* Start SRP ... OTG not required. */
 +              devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
 +              DBG(2, "Sending SRP: devctl: %02x\n", devctl);
 +              devctl |= MGC_M_DEVCTL_SESSION;
 +              musb_writeb(mregs, MGC_O_HDRC_DEVCTL, devctl);
 +              devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
 +              retries = 100;
 +              while (!(devctl & MGC_M_DEVCTL_SESSION)) {
 +                      devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
 +                      if (retries-- < 1)
 +                              break;
 +              }
 +              retries = 10000;
 +              while (devctl & MGC_M_DEVCTL_SESSION) {
 +                      devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
 +                      if (retries-- < 1)
 +                              break;
 +              }
 +
 +              /* Block idling for at least 1s */
 +              musb_platform_try_idle(musb,
 +                      jiffies + msecs_to_jiffies(1 * HZ));
 +
 +              status = 0;
 +              goto done;
 +      default:
 +              goto done;
 +      }
 +
 +      status = 0;
 +
 +      power = musb_readb(mregs, MGC_O_HDRC_POWER);
 +      power |= MGC_M_POWER_RESUME;
 +      musb_writeb(mregs, MGC_O_HDRC_POWER, power);
 +      DBG(2, "issue wakeup\n");
 +
 +      /* FIXME do this next chunk in a timer callback, no udelay */
 +      mdelay(2);
 +
 +      power = musb_readb(mregs, MGC_O_HDRC_POWER);
 +      power &= ~MGC_M_POWER_RESUME;
 +      musb_writeb(mregs, MGC_O_HDRC_POWER, power);
 +done:
 +      spin_unlock_irqrestore(&musb->Lock, flags);
 +      return status;
 +}
 +
 +static int
 +musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
 +{
 +      struct musb     *musb = gadget_to_musb(gadget);
 +
 +      musb->is_self_powered = !!is_selfpowered;
 +      return 0;
 +}
 +
 +static void musb_pullup(struct musb *musb, int is_on)
 +{
 +      u8 power;
 +
 +      power = musb_readb(musb->pRegs, MGC_O_HDRC_POWER);
 +      if (is_on)
 +              power |= MGC_M_POWER_SOFTCONN;
 +      else
 +              power &= ~MGC_M_POWER_SOFTCONN;
 +
 +      /* FIXME if on, HdrcStart; if off, HdrcStop */
 +
 +      DBG(3, "gadget %s D+ pullup %s\n",
 +              musb->pGadgetDriver->function, is_on ? "on" : "off");
 +      musb_writeb(musb->pRegs, MGC_O_HDRC_POWER, power);
 +}
 +
 +#if 0
 +static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
 +{
 +      DBG(2, "<= %s =>\n", __FUNCTION__);
 +
 +      // FIXME iff driver's softconnect flag is set (as it is during probe,
 +      // though that can clear it), just musb_pullup().
 +
 +      return -EINVAL;
 +}
 +
 +static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
 +{
 +      /* FIXME -- delegate to otg_transciever logic */
 +
 +      DBG(2, "<= vbus_draw %u =>\n", mA);
 +      return 0;
 +}
 +#endif
 +
 +static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
 +{
 +      struct musb     *musb = gadget_to_musb(gadget);
 +
 +      if (!musb->xceiv.set_power)
 +              return -EOPNOTSUPP;
 +      return otg_set_power(&musb->xceiv, mA);
 +}
 +
 +static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
 +{
 +      struct musb     *musb = gadget_to_musb(gadget);
 +      unsigned long   flags;
 +
 +      is_on = !!is_on;
 +
 +      /* NOTE: this assumes we are sensing vbus; we'd rather
 +       * not pullup unless the B-session is active.
 +       */
 +      spin_lock_irqsave(&musb->Lock, flags);
 +      if (is_on != musb->softconnect) {
 +              musb->softconnect = is_on;
 +              musb_pullup(musb, is_on);
 +      }
 +      spin_unlock_irqrestore(&musb->Lock, flags);
 +      return 0;
 +}
 +
 +static const struct usb_gadget_ops musb_gadget_operations = {
 +      .get_frame              = musb_gadget_get_frame,
 +      .wakeup                 = musb_gadget_wakeup,
 +      .set_selfpowered        = musb_gadget_set_self_powered,
 +      //.vbus_session         = musb_gadget_vbus_session,
 +      .vbus_draw              = musb_gadget_vbus_draw,
 +      .pullup                 = musb_gadget_pullup,
 +};
 +
 +/****************************************************************
 + * Registration operations
 + ****************************************************************/
 +
 +/* Only this registration code "knows" the rule (from USB standards)
 + * about there being only one external upstream port.  It assumes
 + * all peripheral ports are external...
 + */
 +static struct musb *the_gadget;
 +
 +static void musb_gadget_release(struct device *dev)
 +{
 +      // kref_put(WHAT)
 +      dev_dbg(dev, "%s\n", __FUNCTION__);
 +}
 +
 +
 +static void __init
 +init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 bEnd, int is_in)
 +{
 +      struct musb_hw_ep       *hw_ep = musb->aLocalEnd + bEnd;
 +
 +      memset(ep, 0, sizeof *ep);
 +
 +      ep->bEndNumber = bEnd;
 +      ep->pThis = musb;
 +      ep->hw_ep = hw_ep;
 +      ep->is_in = is_in;
 +
 +      INIT_LIST_HEAD(&ep->req_list);
 +
 +      sprintf(ep->name, "ep%d%s", bEnd,
 +                      (!bEnd || hw_ep->bIsSharedFifo) ? "" : (
 +                              is_in ? "in" : "out"));
 +      ep->end_point.name = ep->name;
 +      INIT_LIST_HEAD(&ep->end_point.ep_list);
 +      if (!bEnd) {
 +              ep->end_point.maxpacket = 64;
 +              ep->end_point.ops = &musb_g_ep0_ops;
 +              musb->g.ep0 = &ep->end_point;
 +      } else {
 +              if (is_in)
 +                      ep->end_point.maxpacket = hw_ep->wMaxPacketSizeTx;
 +              else
 +                      ep->end_point.maxpacket = hw_ep->wMaxPacketSizeRx;
 +              ep->end_point.ops = &musb_ep_ops;
 +              list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
 +      }
 +}
 +
 +/*
 + * Initialize the endpoints exposed to peripheral drivers, with backlinks
 + * to the rest of the driver state.
 + */
 +static inline void __init musb_g_init_endpoints(struct musb *musb)
 +{
 +      u8                      bEnd;
 +      struct musb_hw_ep       *hw_ep;
 +      unsigned                count = 0;
 +
 +      /* intialize endpoint list just once */
 +      INIT_LIST_HEAD(&(musb->g.ep_list));
 +
 +      for (bEnd = 0, hw_ep = musb->aLocalEnd;
 +                      bEnd < musb->bEndCount;
 +                      bEnd++, hw_ep++) {
 +              if (hw_ep->bIsSharedFifo /* || !bEnd */) {
 +                      init_peripheral_ep(musb, &hw_ep->ep_in, bEnd, 0);
 +                      count++;
 +              } else {
 +                      if (hw_ep->wMaxPacketSizeTx) {
 +                              init_peripheral_ep(musb, &hw_ep->ep_in,
 +                                                      bEnd, 1);
 +                              count++;
 +                      }
 +                      if (hw_ep->wMaxPacketSizeRx) {
 +                              init_peripheral_ep(musb, &hw_ep->ep_out,
 +                                                      bEnd, 0);
 +                              count++;
 +                      }
 +              }
 +      }
 +}
 +
 +/* called once during driver setup to initialize and link into
 + * the driver model; memory is zeroed.
 + */
 +int __init musb_gadget_setup(struct musb *musb)
 +{
 +      int status;
 +
 +      /* REVISIT minor race:  if (erroneously) setting up two
 +       * musb peripherals at the same time, only the bus lock
 +       * is probably held.
 +       */
 +      if (the_gadget)
 +              return -EBUSY;
 +      the_gadget = musb;
 +
 +      musb->g.ops = &musb_gadget_operations;
 +      musb->g.is_dualspeed = 1;
 +      musb->g.speed = USB_SPEED_UNKNOWN;
 +
 +      /* this "gadget" abstracts/virtualizes the controller */
 +      strcpy(musb->g.dev.bus_id, "gadget");
 +      musb->g.dev.parent = musb->controller;
 +      musb->g.dev.dma_mask = musb->controller->dma_mask;
 +      musb->g.dev.release = musb_gadget_release;
 +      musb->g.name = musb_driver_name;
 +
 +      if (is_otg_enabled(musb))
 +              musb->g.is_otg = 1;
 +
 +      musb_g_init_endpoints(musb);
 +
 +      musb->is_active = 0;
 +      musb_platform_try_idle(musb, 0);
 +
 +      status = device_register(&musb->g.dev);
 +      if (status != 0)
 +              the_gadget = NULL;
 +      return status;
 +}
 +
 +void musb_gadget_cleanup(struct musb *musb)
 +{
 +      if (musb != the_gadget)
 +              return;
 +
 +      device_unregister(&musb->g.dev);
 +      the_gadget = NULL;
 +}
 +
 +/*
 + * Register the gadget driver. Used by gadget drivers when
 + * registering themselves with the controller.
 + *
 + * -EINVAL something went wrong (not driver)
 + * -EBUSY another gadget is already using the controller
 + * -ENOMEM no memeory to perform the operation
 + *
 + * @param driver the gadget driver
 + * @return <0 if error, 0 if everything is fine
 + */
 +int usb_gadget_register_driver(struct usb_gadget_driver *driver)
 +{
 +      int retval;
 +      unsigned long flags;
 +      struct musb *musb = the_gadget;
 +
 +      if (!driver
 +                      || driver->speed != USB_SPEED_HIGH
 +                      || !driver->bind
 +                      || !driver->setup)
 +              return -EINVAL;
 +
 +      /* driver must be initialized to support peripheral mode */
 +      if (!musb || !(musb->board_mode == MUSB_OTG
 +                              || musb->board_mode != MUSB_OTG)) {
 +              DBG(1,"%s, no dev??\n", __FUNCTION__);
 +              return -ENODEV;
 +      }
 +
 +      DBG(3, "registering driver %s\n", driver->function);
 +      spin_lock_irqsave(&musb->Lock, flags);
 +
 +      if (musb->pGadgetDriver) {
 +              DBG(1, "%s is already bound to %s\n",
 +                              musb_driver_name,
 +                              musb->pGadgetDriver->driver.name);
 +              retval = -EBUSY;
 +      } else {
 +              musb->pGadgetDriver = driver;
 +              musb->g.dev.driver = &driver->driver;
 +              driver->driver.bus = NULL;
 +              musb->softconnect = 1;
 +              retval = 0;
 +      }
 +
 +      spin_unlock_irqrestore(&musb->Lock, flags);
 +
 +      if (retval == 0)
 +              retval = driver->bind(&musb->g);
 +      if (retval != 0) {
 +              DBG(3, "bind to driver %s failed --> %d\n",
 +                      driver->driver.name, retval);
 +              musb->pGadgetDriver = NULL;
 +              musb->g.dev.driver = NULL;
 +      }
 +
 +      /* start peripheral and/or OTG engines */
 +      if (retval == 0) {
 +              spin_lock_irqsave(&musb->Lock, flags);
 +
 +              /* REVISIT always use otg_set_peripheral(), handling
 +               * issues including the root hub one below ...
 +               */
 +              musb->xceiv.gadget = &musb->g;
 +              musb->xceiv.state = OTG_STATE_B_IDLE;
 +              musb->is_active = 1;
 +
 +              /* FIXME this ignores the softconnect flag.  Drivers are
 +               * allowed hold the peripheral inactive until for example
 +               * userspace hooks up printer hardware or DSP codecs, so
 +               * hosts only see fully functional devices.
 +               */
 +
 +              if (!is_otg_enabled(musb))
 +                      musb_start(musb);
 +
 +              spin_unlock_irqrestore(&musb->Lock, flags);
 +
 +              if (is_otg_enabled(musb)) {
 +                      DBG(3, "OTG startup...\n");
 +
 +                      /* REVISIT:  funcall to other code, which also
 +                       * handles power budgeting ... this way also
 +                       * ensures HdrcStart is indirectly called.
 +                       */
 +                      retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
 +                      if (retval < 0) {
 +                              DBG(1, "add_hcd failed, %d\n", retval);
 +                              spin_lock_irqsave(&musb->Lock, flags);
 +                              musb->xceiv.gadget = NULL;
 +                              musb->xceiv.state = OTG_STATE_UNDEFINED;
 +                              musb->pGadgetDriver = NULL;
 +                              musb->g.dev.driver = NULL;
 +                              spin_unlock_irqrestore(&musb->Lock, flags);
 +                      }
 +              }
 +      }
 +
 +      return retval;
 +}
 +EXPORT_SYMBOL(usb_gadget_register_driver);
 +
 +static void
 +stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
 +{
 +      int                     i;
 +      struct musb_hw_ep       *hw_ep;
 +
 +      /* don't disconnect if it's not connected */
 +      if (musb->g.speed == USB_SPEED_UNKNOWN)
 +              driver = NULL;
 +      else
 +              musb->g.speed = USB_SPEED_UNKNOWN;
 +
 +      /* deactivate the hardware */
 +      if (musb->softconnect) {
 +              musb->softconnect = 0;
 +              musb_pullup(musb, 0);
 +      }
 +      musb_stop(musb);
 +
 +      /* killing any outstanding requests will quiesce the driver;
 +       * then report disconnect
 +       */
 +      if (driver) {
 +              for (i = 0, hw_ep = musb->aLocalEnd;
 +                              i < musb->bEndCount;
 +                              i++, hw_ep++) {
 +                      MGC_SelectEnd(musb->pRegs, i);
 +                      if (hw_ep->bIsSharedFifo /* || !bEnd */) {
 +                              nuke(&hw_ep->ep_in, -ESHUTDOWN);
 +                      } else {
 +                              if (hw_ep->wMaxPacketSizeTx)
 +                                      nuke(&hw_ep->ep_in, -ESHUTDOWN);
 +                              if (hw_ep->wMaxPacketSizeRx)
 +                                      nuke(&hw_ep->ep_out, -ESHUTDOWN);
 +                      }
 +              }
 +
 +              spin_unlock(&musb->Lock);
 +              driver->disconnect (&musb->g);
 +              spin_lock(&musb->Lock);
 +      }
 +}
 +
 +/*
 + * Unregister the gadget driver. Used by gadget drivers when
 + * unregistering themselves from the controller.
 + *
 + * @param driver the gadget driver to unregister
 + */
 +int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
 +{
 +      unsigned long   flags;
 +      int             retval = 0;
 +      struct musb     *musb = the_gadget;
 +
 +      if (!driver || !driver->unbind || !musb)
 +              return -EINVAL;
 +
 +      /* REVISIT always use otg_set_peripheral() here too;
 +       * this needs to shut down the OTG engine.
 +       */
 +
 +      spin_lock_irqsave(&musb->Lock, flags);
 +
 +#ifdef        CONFIG_USB_MUSB_OTG
 +      musb_hnp_stop(musb);
 +#endif
 +
 +      if (musb->pGadgetDriver == driver) {
 +              musb->xceiv.state = OTG_STATE_UNDEFINED;
 +              stop_activity(musb, driver);
 +
 +              DBG(3, "unregistering driver %s\n", driver->function);
 +              spin_unlock_irqrestore(&musb->Lock, flags);
 +              driver->unbind(&musb->g);
 +              spin_lock_irqsave(&musb->Lock, flags);
 +
 +              musb->pGadgetDriver = NULL;
 +              musb->g.dev.driver = NULL;
 +
 +              musb->is_active = 0;
 +              musb_platform_try_idle(musb, 0);
 +      } else
 +              retval = -EINVAL;
 +      spin_unlock_irqrestore(&musb->Lock, flags);
 +
 +      if (is_otg_enabled(musb) && retval == 0) {
 +              usb_remove_hcd(musb_to_hcd(musb));
 +              /* FIXME we need to be able to register another
 +               * gadget driver here and have everything work;
 +               * that currently misbehaves.
 +               */
 +      }
 +
 +      return retval;
 +}
 +EXPORT_SYMBOL(usb_gadget_unregister_driver);
 +
 +
 +/***********************************************************************/
 +
 +/* lifecycle operations called through plat_uds.c */
 +
 +void musb_g_resume(struct musb *musb)
 +{
 +      musb->is_suspended = 0;
 +      switch (musb->xceiv.state) {
 +      case OTG_STATE_B_IDLE:
 +              break;
 +      case OTG_STATE_B_WAIT_ACON:
 +      case OTG_STATE_B_PERIPHERAL:
 +              musb->is_active = 1;
 +              if (musb->pGadgetDriver && musb->pGadgetDriver->resume) {
 +                      spin_unlock(&musb->Lock);
 +                      musb->pGadgetDriver->resume(&musb->g);
 +                      spin_lock(&musb->Lock);
 +              }
 +              break;
 +      default:
 +              WARN("unhandled RESUME transition (%s)\n",
 +                              otg_state_string(musb));
 +      }
 +}
 +
 +/* called when SOF packets stop for 3+ msec */
 +void musb_g_suspend(struct musb *musb)
 +{
 +      u8      devctl;
 +
 +      devctl = musb_readb(musb->pRegs, MGC_O_HDRC_DEVCTL);
 +      DBG(3, "devctl %02x\n", devctl);
 +
 +      switch (musb->xceiv.state) {
 +      case OTG_STATE_B_IDLE:
 +              if ((devctl & MGC_M_DEVCTL_VBUS) == MGC_M_DEVCTL_VBUS)
 +                      musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
 +              break;
 +      case OTG_STATE_B_PERIPHERAL:
 +              musb->is_suspended = 1;
 +              if (musb->pGadgetDriver && musb->pGadgetDriver->suspend) {
 +                      spin_unlock(&musb->Lock);
 +                      musb->pGadgetDriver->suspend(&musb->g);
 +                      spin_lock(&musb->Lock);
 +              }
 +              break;
 +      default:
 +              /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
 +               * A_PERIPHERAL may need care too
 +               */
 +              WARN("unhandled SUSPEND transition (%s)\n",
 +                              otg_state_string(musb));
 +      }
 +}
 +
 +/* Called during SRP. Caller must hold lock */
 +void musb_g_wakeup(struct musb *musb)
 +{
 +      musb_gadget_wakeup(&musb->g);
 +}
 +
 +/* called when VBUS drops below session threshold, and in other cases */
 +void musb_g_disconnect(struct musb *musb)
 +{
 +      void __iomem    *mregs = musb->pRegs;
 +      u8      devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
 +
 +      DBG(3, "devctl %02x\n", devctl);
 +
 +      /* clear HR */
 +      musb_writeb(mregs, MGC_O_HDRC_DEVCTL, devctl & MGC_M_DEVCTL_SESSION);
 +
 +      /* don't draw vbus until new b-default session */
 +      (void) musb_gadget_vbus_draw(&musb->g, 0);
 +
 +      musb->g.speed = USB_SPEED_UNKNOWN;
 +      if (musb->pGadgetDriver && musb->pGadgetDriver->disconnect) {
 +              spin_unlock(&musb->Lock);
 +              musb->pGadgetDriver->disconnect(&musb->g);
 +              spin_lock(&musb->Lock);
 +      }
 +
 +      switch (musb->xceiv.state) {
 +      default:
 +#ifdef        CONFIG_USB_MUSB_OTG
 +              musb->xceiv.state = OTG_STATE_A_IDLE;
 +              break;
 +      case OTG_STATE_A_PERIPHERAL:
 +              musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
 +              break;
 +      case OTG_STATE_B_WAIT_ACON:
 +      case OTG_STATE_B_HOST:
 +#endif
 +      case OTG_STATE_B_PERIPHERAL:
 +              musb->xceiv.state = OTG_STATE_B_IDLE;
 +              break;
 +      case OTG_STATE_B_SRP_INIT:
 +              break;
 +      }
 +
 +      musb->is_active = 0;
 +}
 +
 +void musb_g_reset(struct musb *musb)
 +__releases(musb->Lock)
 +__acquires(musb->Lock)
 +{
 +      void __iomem    *pBase = musb->pRegs;
 +      u8              devctl = musb_readb(pBase, MGC_O_HDRC_DEVCTL);
 +      u8              power;
 +
 +      DBG(3, "<== %s addr=%x driver '%s'\n",
 +                      (devctl & MGC_M_DEVCTL_BDEVICE)
 +                              ? "B-Device" : "A-Device",
 +                      musb_readb(pBase, MGC_O_HDRC_FADDR),
 +                      musb->pGadgetDriver
 +                              ? musb->pGadgetDriver->driver.name
 +                              : NULL
 +                      );
 +
 +      /* report disconnect, if we didn't already (flushing EP state) */
 +      if (musb->g.speed != USB_SPEED_UNKNOWN)
 +              musb_g_disconnect(musb);
 +
 +      /* clear HR */
 +      else if (devctl & MGC_M_DEVCTL_HR)
 +              musb_writeb(pBase, MGC_O_HDRC_DEVCTL, MGC_M_DEVCTL_SESSION);
 +
 +
 +      /* what speed did we negotiate? */
 +      power = musb_readb(pBase, MGC_O_HDRC_POWER);
 +      musb->g.speed = (power & MGC_M_POWER_HSMODE)
 +                      ? USB_SPEED_HIGH : USB_SPEED_FULL;
 +
 +      /* start in USB_STATE_DEFAULT */
 +      musb->is_active = 1;
 +      musb->is_suspended = 0;
 +      MUSB_DEV_MODE(musb);
 +      musb->bAddress = 0;
 +      musb->ep0_state = MGC_END0_STAGE_SETUP;
 +
 +      musb->may_wakeup = 0;
 +      musb->g.b_hnp_enable = 0;
 +      musb->g.a_alt_hnp_support = 0;
 +      musb->g.a_hnp_support = 0;
 +
 +      /* Normal reset, as B-Device;
 +       * or else after HNP, as A-Device
 +       */
 +      if (devctl & MGC_M_DEVCTL_BDEVICE) {
 +              musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
 +              musb->g.is_a_peripheral = 0;
 +      } else if (is_otg_enabled(musb)) {
 +              musb->xceiv.state = OTG_STATE_A_PERIPHERAL;
 +              musb->g.is_a_peripheral = 1;
 +      } else
 +              WARN_ON(1);
 +
 +      /* start with default limits on VBUS power draw */
 +      (void) musb_gadget_vbus_draw(&musb->g,
 +                      is_otg_enabled(musb) ? 8 : 100);
 +}
Simple merge
Simple merge
@@@ -15,22 -15,15 +15,17 @@@ objs-$(CONFIG_ARCH_OMAP2)$(CONFIG_FB_OM
  objs-y$(CONFIG_FB_OMAP_LCDC_HWA742) += hwa742.o
  objs-y$(CONFIG_FB_OMAP_LCDC_BLIZZARD) += blizzard.o
  
 +objs-y$(CONFIG_MACH_AMS_DELTA) += lcd_ams_delta.o
  objs-y$(CONFIG_MACH_OMAP_H4) += lcd_h4.o
  objs-y$(CONFIG_MACH_OMAP_H3) += lcd_h3.o
 +objs-y$(CONFIG_MACH_OMAP_H2) += lcd_h2.o
  objs-y$(CONFIG_MACH_OMAP_PALMTE) += lcd_palmte.o
- objs-y$(CONFIG_MACH_OMAP_PALMZ71) += lcd_palmz71.o
  objs-y$(CONFIG_MACH_OMAP_PALMTT) += lcd_palmtt.o
+ objs-y$(CONFIG_MACH_OMAP_PALMZ71) += lcd_palmz71.o
  objs-$(CONFIG_ARCH_OMAP16XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1610.o
  objs-$(CONFIG_ARCH_OMAP15XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1510.o
- objs-y$(CONFIG_MACH_SX1) += lcd_sx1.o
  objs-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o
- objs-y$(CONFIG_MACH_OMAP_PERSEUS2) += lcd_p2.o
- objs-y$(CONFIG_MACH_OMAP_APOLLON) += lcd_apollon.o
- objs-y$(CONFIG_MACH_OMAP_2430SDP) += lcd_2430sdp.o
- objs-y$(CONFIG_FB_OMAP_LCD_MIPID) += lcd_mipid.o
+ objs-y$(CONFIG_MACH_SX1) += lcd_sx1.o
  
  omapfb-objs := $(objs-yy)
  
@@@ -2,7 -2,7 +2,7 @@@
   * OMAP2 Remote Frame Buffer Interface support
   *
   * Copyright (C) 2005 Nokia Corporation
-- * Author: Juha Yrjölä <juha.yrjola@nokia.com>
++ * Author: Juha Yrj�l� <juha.yrjola@nokia.com>
   *       Imre Deak <imre.deak@nokia.com>
   *
   * This program is free software; you can redistribute it and/or modify it
@@@ -2,7 -2,7 +2,7 @@@
   * OMAP1 Special OptimiSed Screen Interface support
   *
   * Copyright (C) 2004-2005 Nokia Corporation
-- * Author: Juha Yrjölä <juha.yrjola@nokia.com>
++ * Author: Juha Yrj�l� <juha.yrjola@nokia.com>
   *
   * This program is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License as published by the
Simple merge
Simple merge
Simple merge
  #define I2C_DRIVERID_KS0127   86      /* Samsung ks0127 video decoder */
  #define I2C_DRIVERID_TLV320AIC23B 87  /* TI TLV320AIC23B audio codec  */
  #define I2C_DRIVERID_ISL1208  88      /* Intersil ISL1208 RTC         */
- #define I2C_DRIVERID_WM8731           89      /* Wolfson WM8731 audio codec */
- #define I2C_DRIVERID_WM8750           90      /* Wolfson WM8750 audio codec */
- #define I2C_DRIVERID_WM8753           91      /* Wolfson WM8753 audio codec */
+ #define I2C_DRIVERID_WM8731   89      /* Wolfson WM8731 audio codec */
+ #define I2C_DRIVERID_WM8750   90      /* Wolfson WM8750 audio codec */
+ #define I2C_DRIVERID_WM8753   91      /* Wolfson WM8753 audio codec */
+ #define I2C_DRIVERID_LM4857   92      /* LM4857 Audio Amplifier */
  
 +#define I2C_DRIVERID_MISC     99      /* Whatever until sorted out    */
 +
  #define I2C_DRIVERID_I2CDEV   900
  #define I2C_DRIVERID_ARP        902    /* SMBus ARP Client              */
  #define I2C_DRIVERID_ALERT      903    /* SMBus Alert Responder Client  */
diff --cc kernel/printk.c
@@@ -476,8 -464,6 +479,9 @@@ static int __init printk_time_setup(cha
  
  __setup("time", printk_time_setup);
  
 +#endif
++module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
 +
  __attribute__((weak)) unsigned long long printk_clock(void)
  {
        return sched_clock();
Simple merge
Simple merge