Merge branch 'sh/stable-updates'
authorPaul Mundt <lethal@linux-sh.org>
Mon, 26 Apr 2010 07:08:27 +0000 (16:08 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Mon, 26 Apr 2010 07:08:27 +0000 (16:08 +0900)
Conflicts:
arch/sh/kernel/dwarf.c
drivers/dma/shdma.c

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
1  2 
arch/sh/include/cpu-sh4/cpu/dma-register.h
arch/sh/kernel/dwarf.c
arch/sh/kernel/process.c
arch/sh/mm/pmb.c
drivers/clocksource/sh_cmt.c
drivers/clocksource/sh_mtu2.c
drivers/clocksource/sh_tmu.c
drivers/dma/shdma.c
drivers/serial/sh-sci.c
drivers/sh/intc.c

@@@ -23,8 -23,7 +23,8 @@@
  #define CHCR_TS_HIGH_MASK     0
  #define CHCR_TS_HIGH_SHIFT    0
  #elif defined(CONFIG_CPU_SUBTYPE_SH7722) || \
 -      defined(CONFIG_CPU_SUBTYPE_SH7724)
 +      defined(CONFIG_CPU_SUBTYPE_SH7724) || \
 +      defined(CONFIG_CPU_SUBTYPE_SH7786)
  #define CHCR_TS_LOW_MASK      0x00000018
  #define CHCR_TS_LOW_SHIFT     3
  #define CHCR_TS_HIGH_MASK     0x00300000
@@@ -77,7 -76,7 +77,7 @@@ enum 
  }
  
  #define TS_INDEX2VAL(i)       ((((i) & 3) << CHCR_TS_LOW_SHIFT) | \
-                        ((((i) >> 2) & 3) << CHCR_TS_HIGH_SHIFT))
+                        (((i) & 0xc) << CHCR_TS_HIGH_SHIFT))
  
  #else /* CONFIG_CPU_SH4A */
  
diff --combined arch/sh/kernel/dwarf.c
@@@ -22,7 -22,7 +22,8 @@@
  #include <linux/mm.h>
  #include <linux/elf.h>
  #include <linux/ftrace.h>
 +#include <linux/module.h>
+ #include <linux/slab.h>
  #include <asm/dwarf.h>
  #include <asm/unwinder.h>
  #include <asm/sections.h>
diff --combined arch/sh/kernel/process.c
@@@ -1,5 -1,6 +1,6 @@@
  #include <linux/mm.h>
  #include <linux/kernel.h>
+ #include <linux/slab.h>
  #include <linux/sched.h>
  
  struct kmem_cache *task_xstate_cachep = NULL;
@@@ -89,7 -90,7 +90,7 @@@ void arch_task_cache_init(void
  # define HAVE_SOFTFP  0
  #endif
  
 -void init_thread_xstate(void)
 +void __cpuinit init_thread_xstate(void)
  {
        if (boot_cpu_data.flags & CPU_HAS_FPU)
                xstate_size = sizeof(struct sh_fpu_hard_struct);
diff --combined arch/sh/mm/pmb.c
@@@ -15,7 -15,6 +15,6 @@@
  #include <linux/sysdev.h>
  #include <linux/cpu.h>
  #include <linux/module.h>
- #include <linux/slab.h>
  #include <linux/bitops.h>
  #include <linux/debugfs.h>
  #include <linux/fs.h>
@@@ -681,7 -680,7 +680,7 @@@ static void __init pmb_merge(struct pmb
        /*
         * The merged page size must be valid.
         */
 -      if (!pmb_size_valid(newsize))
 +      if (!depth || !pmb_size_valid(newsize))
                return;
  
        head->flags &= ~PMB_SZ_MASK;
@@@ -29,6 -29,7 +29,7 @@@
  #include <linux/clocksource.h>
  #include <linux/clockchips.h>
  #include <linux/sh_timer.h>
+ #include <linux/slab.h>
  
  struct sh_cmt_priv {
        void __iomem *mapbase;
@@@ -149,12 -150,13 +150,12 @@@ static void sh_cmt_start_stop_ch(struc
  
  static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
  {
 -      struct sh_timer_config *cfg = p->pdev->dev.platform_data;
        int ret;
  
        /* enable clock */
        ret = clk_enable(p->clk);
        if (ret) {
 -              pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk);
 +              dev_err(&p->pdev->dev, "cannot enable clock\n");
                return ret;
        }
  
@@@ -277,7 -279,7 +278,7 @@@ static void sh_cmt_clock_event_program_
                        delay = 1;
  
                if (!delay)
 -                      pr_warning("sh_cmt: too long delay\n");
 +                      dev_warn(&p->pdev->dev, "too long delay\n");
  
        } while (delay);
  }
@@@ -287,7 -289,7 +288,7 @@@ static void sh_cmt_set_next(struct sh_c
        unsigned long flags;
  
        if (delta > p->max_match_value)
 -              pr_warning("sh_cmt: delta out of range\n");
 +              dev_warn(&p->pdev->dev, "delta out of range\n");
  
        spin_lock_irqsave(&p->lock, flags);
        p->next_match_value = delta;
@@@ -449,7 -451,7 +450,7 @@@ static int sh_cmt_register_clocksource(
        cs->resume = sh_cmt_clocksource_resume;
        cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
        cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
 -      pr_info("sh_cmt: %s used as clock source\n", cs->name);
 +      dev_info(&p->pdev->dev, "used as clock source\n");
        clocksource_register(cs);
        return 0;
  }
@@@ -495,11 -497,13 +496,11 @@@ static void sh_cmt_clock_event_mode(enu
  
        switch (mode) {
        case CLOCK_EVT_MODE_PERIODIC:
 -              pr_info("sh_cmt: %s used for periodic clock events\n",
 -                      ced->name);
 +              dev_info(&p->pdev->dev, "used for periodic clock events\n");
                sh_cmt_clock_event_start(p, 1);
                break;
        case CLOCK_EVT_MODE_ONESHOT:
 -              pr_info("sh_cmt: %s used for oneshot clock events\n",
 -                      ced->name);
 +              dev_info(&p->pdev->dev, "used for oneshot clock events\n");
                sh_cmt_clock_event_start(p, 0);
                break;
        case CLOCK_EVT_MODE_SHUTDOWN:
@@@ -540,7 -544,7 +541,7 @@@ static void sh_cmt_register_clockevent(
        ced->set_next_event = sh_cmt_clock_event_next;
        ced->set_mode = sh_cmt_clock_event_mode;
  
 -      pr_info("sh_cmt: %s used for clock events\n", ced->name);
 +      dev_info(&p->pdev->dev, "used for clock events\n");
        clockevents_register_device(ced);
  }
  
@@@ -597,27 -601,22 +598,27 @@@ static int sh_cmt_setup(struct sh_cmt_p
        /* map memory, let mapbase point to our channel */
        p->mapbase = ioremap_nocache(res->start, resource_size(res));
        if (p->mapbase == NULL) {
 -              pr_err("sh_cmt: failed to remap I/O memory\n");
 +              dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
                goto err0;
        }
  
        /* request irq using setup_irq() (too early for request_irq()) */
 -      p->irqaction.name = cfg->name;
 +      p->irqaction.name = dev_name(&p->pdev->dev);
        p->irqaction.handler = sh_cmt_interrupt;
        p->irqaction.dev_id = p;
 -      p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
 +      p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
 +                           IRQF_IRQPOLL  | IRQF_NOBALANCING;
  
        /* get hold of clock */
 -      p->clk = clk_get(&p->pdev->dev, cfg->clk);
 +      p->clk = clk_get(&p->pdev->dev, "cmt_fck");
        if (IS_ERR(p->clk)) {
 -              pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk);
 -              ret = PTR_ERR(p->clk);
 -              goto err1;
 +              dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
 +              p->clk = clk_get(&p->pdev->dev, cfg->clk);
 +              if (IS_ERR(p->clk)) {
 +                      dev_err(&p->pdev->dev, "cannot get clock\n");
 +                      ret = PTR_ERR(p->clk);
 +                      goto err1;
 +              }
        }
  
        if (resource_size(res) == 6) {
                p->clear_bits = ~0xc000;
        }
  
 -      ret = sh_cmt_register(p, cfg->name,
 +      ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
                              cfg->clockevent_rating,
                              cfg->clocksource_rating);
        if (ret) {
 -              pr_err("sh_cmt: registration failed\n");
 +              dev_err(&p->pdev->dev, "registration failed\n");
                goto err1;
        }
  
        ret = setup_irq(irq, &p->irqaction);
        if (ret) {
 -              pr_err("sh_cmt: failed to request irq %d\n", irq);
 +              dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
                goto err1;
        }
  
@@@ -655,10 -654,11 +656,10 @@@ err0
  static int __devinit sh_cmt_probe(struct platform_device *pdev)
  {
        struct sh_cmt_priv *p = platform_get_drvdata(pdev);
 -      struct sh_timer_config *cfg = pdev->dev.platform_data;
        int ret;
  
        if (p) {
 -              pr_info("sh_cmt: %s kept as earlytimer\n", cfg->name);
 +              dev_info(&pdev->dev, "kept as earlytimer\n");
                return 0;
        }
  
@@@ -29,6 -29,7 +29,7 @@@
  #include <linux/err.h>
  #include <linux/clockchips.h>
  #include <linux/sh_timer.h>
+ #include <linux/slab.h>
  
  struct sh_mtu2_priv {
        void __iomem *mapbase;
@@@ -118,12 -119,13 +119,12 @@@ static void sh_mtu2_start_stop_ch(struc
  
  static int sh_mtu2_enable(struct sh_mtu2_priv *p)
  {
 -      struct sh_timer_config *cfg = p->pdev->dev.platform_data;
        int ret;
  
        /* enable clock */
        ret = clk_enable(p->clk);
        if (ret) {
 -              pr_err("sh_mtu2: cannot enable clock \"%s\"\n", cfg->clk);
 +              dev_err(&p->pdev->dev, "cannot enable clock\n");
                return ret;
        }
  
@@@ -192,7 -194,8 +193,7 @@@ static void sh_mtu2_clock_event_mode(en
  
        switch (mode) {
        case CLOCK_EVT_MODE_PERIODIC:
 -              pr_info("sh_mtu2: %s used for periodic clock events\n",
 -                      ced->name);
 +              dev_info(&p->pdev->dev, "used for periodic clock events\n");
                sh_mtu2_enable(p);
                break;
        case CLOCK_EVT_MODE_UNUSED:
@@@ -219,13 -222,13 +220,13 @@@ static void sh_mtu2_register_clockevent
        ced->cpumask = cpumask_of(0);
        ced->set_mode = sh_mtu2_clock_event_mode;
  
 -      pr_info("sh_mtu2: %s used for clock events\n", ced->name);
 +      dev_info(&p->pdev->dev, "used for clock events\n");
        clockevents_register_device(ced);
  
        ret = setup_irq(p->irqaction.irq, &p->irqaction);
        if (ret) {
 -              pr_err("sh_mtu2: failed to request irq %d\n",
 -                     p->irqaction.irq);
 +              dev_err(&p->pdev->dev, "failed to request irq %d\n",
 +                      p->irqaction.irq);
                return;
        }
  }
@@@ -271,32 -274,26 +272,32 @@@ static int sh_mtu2_setup(struct sh_mtu2
        /* map memory, let mapbase point to our channel */
        p->mapbase = ioremap_nocache(res->start, resource_size(res));
        if (p->mapbase == NULL) {
 -              pr_err("sh_mtu2: failed to remap I/O memory\n");
 +              dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
                goto err0;
        }
  
        /* setup data for setup_irq() (too early for request_irq()) */
 -      p->irqaction.name = cfg->name;
 +      p->irqaction.name = dev_name(&p->pdev->dev);
        p->irqaction.handler = sh_mtu2_interrupt;
        p->irqaction.dev_id = p;
        p->irqaction.irq = irq;
 -      p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
 +      p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
 +                           IRQF_IRQPOLL  | IRQF_NOBALANCING;
  
        /* get hold of clock */
 -      p->clk = clk_get(&p->pdev->dev, cfg->clk);
 +      p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
        if (IS_ERR(p->clk)) {
 -              pr_err("sh_mtu2: cannot get clock \"%s\"\n", cfg->clk);
 -              ret = PTR_ERR(p->clk);
 -              goto err1;
 +              dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
 +              p->clk = clk_get(&p->pdev->dev, cfg->clk);
 +              if (IS_ERR(p->clk)) {
 +                      dev_err(&p->pdev->dev, "cannot get clock\n");
 +                      ret = PTR_ERR(p->clk);
 +                      goto err1;
 +              }
        }
  
 -      return sh_mtu2_register(p, cfg->name, cfg->clockevent_rating);
 +      return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
 +                              cfg->clockevent_rating);
   err1:
        iounmap(p->mapbase);
   err0:
  static int __devinit sh_mtu2_probe(struct platform_device *pdev)
  {
        struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
 -      struct sh_timer_config *cfg = pdev->dev.platform_data;
        int ret;
  
        if (p) {
 -              pr_info("sh_mtu2: %s kept as earlytimer\n", cfg->name);
 +              dev_info(&pdev->dev, "kept as earlytimer\n");
                return 0;
        }
  
@@@ -30,6 -30,7 +30,7 @@@
  #include <linux/clocksource.h>
  #include <linux/clockchips.h>
  #include <linux/sh_timer.h>
+ #include <linux/slab.h>
  
  struct sh_tmu_priv {
        void __iomem *mapbase;
@@@ -106,12 -107,13 +107,12 @@@ static void sh_tmu_start_stop_ch(struc
  
  static int sh_tmu_enable(struct sh_tmu_priv *p)
  {
 -      struct sh_timer_config *cfg = p->pdev->dev.platform_data;
        int ret;
  
        /* enable clock */
        ret = clk_enable(p->clk);
        if (ret) {
 -              pr_err("sh_tmu: cannot enable clock \"%s\"\n", cfg->clk);
 +              dev_err(&p->pdev->dev, "cannot enable clock\n");
                return ret;
        }
  
@@@ -227,7 -229,7 +228,7 @@@ static int sh_tmu_register_clocksource(
        cs->disable = sh_tmu_clocksource_disable;
        cs->mask = CLOCKSOURCE_MASK(32);
        cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
 -      pr_info("sh_tmu: %s used as clock source\n", cs->name);
 +      dev_info(&p->pdev->dev, "used as clock source\n");
        clocksource_register(cs);
        return 0;
  }
@@@ -275,11 -277,13 +276,11 @@@ static void sh_tmu_clock_event_mode(enu
  
        switch (mode) {
        case CLOCK_EVT_MODE_PERIODIC:
 -              pr_info("sh_tmu: %s used for periodic clock events\n",
 -                      ced->name);
 +              dev_info(&p->pdev->dev, "used for periodic clock events\n");
                sh_tmu_clock_event_start(p, 1);
                break;
        case CLOCK_EVT_MODE_ONESHOT:
 -              pr_info("sh_tmu: %s used for oneshot clock events\n",
 -                      ced->name);
 +              dev_info(&p->pdev->dev, "used for oneshot clock events\n");
                sh_tmu_clock_event_start(p, 0);
                break;
        case CLOCK_EVT_MODE_UNUSED:
@@@ -320,13 -324,13 +321,13 @@@ static void sh_tmu_register_clockevent(
        ced->set_next_event = sh_tmu_clock_event_next;
        ced->set_mode = sh_tmu_clock_event_mode;
  
 -      pr_info("sh_tmu: %s used for clock events\n", ced->name);
 +      dev_info(&p->pdev->dev, "used for clock events\n");
        clockevents_register_device(ced);
  
        ret = setup_irq(p->irqaction.irq, &p->irqaction);
        if (ret) {
 -              pr_err("sh_tmu: failed to request irq %d\n",
 -                     p->irqaction.irq);
 +              dev_err(&p->pdev->dev, "failed to request irq %d\n",
 +                      p->irqaction.irq);
                return;
        }
  }
@@@ -375,31 -379,26 +376,31 @@@ static int sh_tmu_setup(struct sh_tmu_p
        /* map memory, let mapbase point to our channel */
        p->mapbase = ioremap_nocache(res->start, resource_size(res));
        if (p->mapbase == NULL) {
 -              pr_err("sh_tmu: failed to remap I/O memory\n");
 +              dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
                goto err0;
        }
  
        /* setup data for setup_irq() (too early for request_irq()) */
 -      p->irqaction.name = cfg->name;
 +      p->irqaction.name = dev_name(&p->pdev->dev);
        p->irqaction.handler = sh_tmu_interrupt;
        p->irqaction.dev_id = p;
        p->irqaction.irq = irq;
 -      p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
 +      p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
 +                           IRQF_IRQPOLL  | IRQF_NOBALANCING;
  
        /* get hold of clock */
 -      p->clk = clk_get(&p->pdev->dev, cfg->clk);
 +      p->clk = clk_get(&p->pdev->dev, "tmu_fck");
        if (IS_ERR(p->clk)) {
 -              pr_err("sh_tmu: cannot get clock \"%s\"\n", cfg->clk);
 -              ret = PTR_ERR(p->clk);
 -              goto err1;
 +              dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
 +              p->clk = clk_get(&p->pdev->dev, cfg->clk);
 +              if (IS_ERR(p->clk)) {
 +                      dev_err(&p->pdev->dev, "cannot get clock\n");
 +                      ret = PTR_ERR(p->clk);
 +                      goto err1;
 +              }
        }
  
 -      return sh_tmu_register(p, cfg->name,
 +      return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
                               cfg->clockevent_rating,
                               cfg->clocksource_rating);
   err1:
  static int __devinit sh_tmu_probe(struct platform_device *pdev)
  {
        struct sh_tmu_priv *p = platform_get_drvdata(pdev);
 -      struct sh_timer_config *cfg = pdev->dev.platform_data;
        int ret;
  
        if (p) {
 -              pr_info("sh_tmu: %s kept as earlytimer\n", cfg->name);
 +              dev_info(&pdev->dev, "kept as earlytimer\n");
                return 0;
        }
  
diff --combined drivers/dma/shdma.c
  
  #include <linux/init.h>
  #include <linux/module.h>
+ #include <linux/slab.h>
  #include <linux/interrupt.h>
  #include <linux/dmaengine.h>
  #include <linux/delay.h>
  #include <linux/dma-mapping.h>
  #include <linux/platform_device.h>
  #include <linux/pm_runtime.h>
 -
 -#include <asm/dmaengine.h>
 +#include <linux/sh_dma.h>
  
  #include "shdma.h"
  
@@@ -43,7 -45,7 +44,7 @@@ enum sh_dmae_desc_status 
  #define LOG2_DEFAULT_XFER_SIZE        2
  
  /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
 -static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
 +static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
  
  static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
  
@@@ -188,7 -190,7 +189,7 @@@ static int dmae_set_dmars(struct sh_dma
        struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
                                                struct sh_dmae_device, common);
        struct sh_dmae_pdata *pdata = shdev->pdata;
 -      struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
 +      const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
        u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
        int shift = chan_pdata->dmars_bit;
  
@@@ -264,8 -266,8 +265,8 @@@ static struct sh_desc *sh_dmae_get_desc
        return NULL;
  }
  
 -static struct sh_dmae_slave_config *sh_dmae_find_slave(
 -      struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id)
 +static const struct sh_dmae_slave_config *sh_dmae_find_slave(
 +      struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
  {
        struct dma_device *dma_dev = sh_chan->common.device;
        struct sh_dmae_device *shdev = container_of(dma_dev,
        struct sh_dmae_pdata *pdata = shdev->pdata;
        int i;
  
 -      if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
 +      if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
                return NULL;
  
        for (i = 0; i < pdata->slave_num; i++)
 -              if (pdata->slave[i].slave_id == slave_id)
 +              if (pdata->slave[i].slave_id == param->slave_id)
                        return pdata->slave + i;
  
        return NULL;
@@@ -288,6 -290,7 +289,7 @@@ static int sh_dmae_alloc_chan_resources
        struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
        struct sh_desc *desc;
        struct sh_dmae_slave *param = chan->private;
+       int ret;
  
        pm_runtime_get_sync(sh_chan->dev);
  
         * never runs concurrently with itself or free_chan_resources.
         */
        if (param) {
 -              struct sh_dmae_slave_config *cfg;
 +              const struct sh_dmae_slave_config *cfg;
  
 -              cfg = sh_dmae_find_slave(sh_chan, param->slave_id);
 +              cfg = sh_dmae_find_slave(sh_chan, param);
-               if (!cfg)
-                       return -EINVAL;
+               if (!cfg) {
+                       ret = -EINVAL;
+                       goto efindslave;
+               }
  
-               if (test_and_set_bit(param->slave_id, sh_dmae_slave_used))
-                       return -EBUSY;
+               if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
+                       ret = -EBUSY;
+                       goto etestused;
+               }
  
                param->config = cfg;
  
        }
        spin_unlock_bh(&sh_chan->desc_lock);
  
-       if (!sh_chan->descs_allocated)
-               pm_runtime_put(sh_chan->dev);
+       if (!sh_chan->descs_allocated) {
+               ret = -ENOMEM;
+               goto edescalloc;
+       }
  
        return sh_chan->descs_allocated;
+ edescalloc:
+       if (param)
+               clear_bit(param->slave_id, sh_dmae_slave_used);
+ etestused:
+ efindslave:
+       pm_runtime_put(sh_chan->dev);
+       return ret;
  }
  
  /*
@@@ -557,14 -574,12 +573,14 @@@ static struct dma_async_tx_descriptor *
  {
        struct sh_dmae_slave *param;
        struct sh_dmae_chan *sh_chan;
 +      dma_addr_t slave_addr;
  
        if (!chan)
                return NULL;
  
        sh_chan = to_sh_chan(chan);
        param = chan->private;
 +      slave_addr = param->config->addr;
  
        /* Someone calling slave DMA on a public channel? */
        if (!param || !sg_len) {
         * if (param != NULL), this is a successfully requested slave channel,
         * therefore param->config != NULL too.
         */
 -      return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr,
 +      return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
                               direction, flags);
  }
  
@@@ -858,7 -873,7 +874,7 @@@ static int __devinit sh_dmae_chan_probe
                                        int irq, unsigned long flags)
  {
        int err;
 -      struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
 +      const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
        struct platform_device *pdev = to_platform_device(shdev->common.dev);
        struct sh_dmae_chan *new_sh_chan;
  
diff --combined drivers/serial/sh-sci.c
@@@ -50,6 -50,7 +50,7 @@@
  #include <linux/list.h>
  #include <linux/dmaengine.h>
  #include <linux/scatterlist.h>
+ #include <linux/slab.h>
  
  #ifdef CONFIG_SUPERH
  #include <asm/sh_bios.h>
@@@ -82,16 -83,16 +83,16 @@@ struct sci_port 
  
        /* Interface clock */
        struct clk              *iclk;
 -      /* Data clock */
 -      struct clk              *dclk;
 +      /* Function clock */
 +      struct clk              *fclk;
  
        struct list_head        node;
        struct dma_chan                 *chan_tx;
        struct dma_chan                 *chan_rx;
  #ifdef CONFIG_SERIAL_SH_SCI_DMA
        struct device                   *dma_dev;
 -      enum sh_dmae_slave_chan_id      slave_tx;
 -      enum sh_dmae_slave_chan_id      slave_rx;
 +      unsigned int                    slave_tx;
 +      unsigned int                    slave_rx;
        struct dma_async_tx_descriptor  *desc_tx;
        struct dma_async_tx_descriptor  *desc_rx[2];
        dma_cookie_t                    cookie_tx;
        struct work_struct              work_tx;
        struct work_struct              work_rx;
        struct timer_list               rx_timer;
 +      unsigned int                    rx_timeout;
  #endif
  };
  
@@@ -674,22 -674,22 +675,22 @@@ static irqreturn_t sci_rx_interrupt(in
        struct sci_port *s = to_sci_port(port);
  
        if (s->chan_rx) {
 -              unsigned long tout;
                u16 scr = sci_in(port, SCSCR);
                u16 ssr = sci_in(port, SCxSR);
  
                /* Disable future Rx interrupts */
 -              sci_out(port, SCSCR, scr & ~SCI_CTRL_FLAGS_RIE);
 +              if (port->type == PORT_SCIFA) {
 +                      disable_irq_nosync(irq);
 +                      scr |= 0x4000;
 +              } else {
 +                      scr &= ~SCI_CTRL_FLAGS_RIE;
 +              }
 +              sci_out(port, SCSCR, scr);
                /* Clear current interrupt */
                sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
 -              /* Calculate delay for 1.5 DMA buffers */
 -              tout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
 -                      port->fifosize / 2;
 -              dev_dbg(port->dev, "Rx IRQ: setup timeout in %lu ms\n",
 -                      tout * 1000 / HZ);
 -              if (tout < 2)
 -                      tout = 2;
 -              mod_timer(&s->rx_timer, jiffies + tout);
 +              dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
 +                      jiffies, s->rx_timeout);
 +              mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
  
                return IRQ_HANDLED;
        }
@@@ -799,7 -799,7 +800,7 @@@ static int sci_notifier(struct notifier
            (phase == CPUFREQ_RESUMECHANGE)) {
                spin_lock_irqsave(&priv->lock, flags);
                list_for_each_entry(sci_port, &priv->ports, node)
 -                      sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
 +                      sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
                spin_unlock_irqrestore(&priv->lock, flags);
        }
  
@@@ -810,17 -810,21 +811,17 @@@ static void sci_clk_enable(struct uart_
  {
        struct sci_port *sci_port = to_sci_port(port);
  
 -      clk_enable(sci_port->dclk);
 -      sci_port->port.uartclk = clk_get_rate(sci_port->dclk);
 -
 -      if (sci_port->iclk)
 -              clk_enable(sci_port->iclk);
 +      clk_enable(sci_port->iclk);
 +      sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
 +      clk_enable(sci_port->fclk);
  }
  
  static void sci_clk_disable(struct uart_port *port)
  {
        struct sci_port *sci_port = to_sci_port(port);
  
 -      if (sci_port->iclk)
 -              clk_disable(sci_port->iclk);
 -
 -      clk_disable(sci_port->dclk);
 +      clk_disable(sci_port->fclk);
 +      clk_disable(sci_port->iclk);
  }
  
  static int sci_request_irq(struct sci_port *port)
@@@ -909,26 -913,22 +910,26 @@@ static void sci_dma_tx_complete(void *a
  
        spin_lock_irqsave(&port->lock, flags);
  
 -      xmit->tail += s->sg_tx.length;
 +      xmit->tail += sg_dma_len(&s->sg_tx);
        xmit->tail &= UART_XMIT_SIZE - 1;
  
 -      port->icount.tx += s->sg_tx.length;
 +      port->icount.tx += sg_dma_len(&s->sg_tx);
  
        async_tx_ack(s->desc_tx);
        s->cookie_tx = -EINVAL;
        s->desc_tx = NULL;
  
 -      spin_unlock_irqrestore(&port->lock, flags);
 -
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
                uart_write_wakeup(port);
  
 -      if (uart_circ_chars_pending(xmit))
 +      if (!uart_circ_empty(xmit)) {
                schedule_work(&s->work_tx);
 +      } else if (port->type == PORT_SCIFA) {
 +              u16 ctrl = sci_in(port, SCSCR);
 +              sci_out(port, SCSCR, ctrl & ~SCI_CTRL_FLAGS_TIE);
 +      }
 +
 +      spin_unlock_irqrestore(&port->lock, flags);
  }
  
  /* Locking: called with port lock held */
@@@ -972,13 -972,13 +973,13 @@@ static void sci_dma_rx_complete(void *a
        unsigned long flags;
        int count;
  
 -      dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
 +      dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
  
        spin_lock_irqsave(&port->lock, flags);
  
        count = sci_dma_rx_push(s, tty, s->buf_len_rx);
  
 -      mod_timer(&s->rx_timer, jiffies + msecs_to_jiffies(5));
 +      mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
  
        spin_unlock_irqrestore(&port->lock, flags);
  
@@@ -1050,8 -1050,6 +1051,8 @@@ static void sci_submit_rx(struct sci_po
                        sci_rx_dma_release(s, true);
                        return;
                }
 +              dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
 +                      s->cookie_rx[i], i);
        }
  
        s->active_rx = s->cookie_rx[0];
@@@ -1109,10 -1107,10 +1110,10 @@@ static void work_fn_rx(struct work_stru
                return;
        }
  
 -      dev_dbg(port->dev, "%s: cookie %d #%d\n", __func__,
 -              s->cookie_rx[new], new);
 -
        s->active_rx = s->cookie_rx[!new];
 +
 +      dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
 +              s->cookie_rx[new], new, s->active_rx);
  }
  
  static void work_fn_tx(struct work_struct *work)
         */
        spin_lock_irq(&port->lock);
        sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
 -      sg->dma_address = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
 +      sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
                sg->offset;
 -      sg->length = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
 +      sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
                CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
 -      sg->dma_length = sg->length;
        spin_unlock_irq(&port->lock);
  
 -      BUG_ON(!sg->length);
 +      BUG_ON(!sg_dma_len(sg));
  
        desc = chan->device->device_prep_slave_sg(chan,
                        sg, s->sg_len_tx, DMA_TO_DEVICE,
  
  static void sci_start_tx(struct uart_port *port)
  {
 +      struct sci_port *s = to_sci_port(port);
        unsigned short ctrl;
  
  #ifdef CONFIG_SERIAL_SH_SCI_DMA
 -      struct sci_port *s = to_sci_port(port);
 -
 -      if (s->chan_tx) {
 -              if (!uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0)
 -                      schedule_work(&s->work_tx);
 -
 -              return;
 +      if (port->type == PORT_SCIFA) {
 +              u16 new, scr = sci_in(port, SCSCR);
 +              if (s->chan_tx)
 +                      new = scr | 0x8000;
 +              else
 +                      new = scr & ~0x8000;
 +              if (new != scr)
 +                      sci_out(port, SCSCR, new);
        }
 +      if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
 +          s->cookie_tx < 0)
 +              schedule_work(&s->work_tx);
  #endif
 -
 -      /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
 -      ctrl = sci_in(port, SCSCR);
 -      ctrl |= SCI_CTRL_FLAGS_TIE;
 -      sci_out(port, SCSCR, ctrl);
 +      if (!s->chan_tx || port->type == PORT_SCIFA) {
 +              /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
 +              ctrl = sci_in(port, SCSCR);
 +              sci_out(port, SCSCR, ctrl | SCI_CTRL_FLAGS_TIE);
 +      }
  }
  
  static void sci_stop_tx(struct uart_port *port)
  
        /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
        ctrl = sci_in(port, SCSCR);
 +      if (port->type == PORT_SCIFA)
 +              ctrl &= ~0x8000;
        ctrl &= ~SCI_CTRL_FLAGS_TIE;
        sci_out(port, SCSCR, ctrl);
  }
@@@ -1216,8 -1208,6 +1217,8 @@@ static void sci_start_rx(struct uart_po
  
        /* Set RIE (Receive Interrupt Enable) bit in SCSCR */
        ctrl |= sci_in(port, SCSCR);
 +      if (port->type == PORT_SCIFA)
 +              ctrl &= ~0x4000;
        sci_out(port, SCSCR, ctrl);
  }
  
@@@ -1227,8 -1217,6 +1228,8 @@@ static void sci_stop_rx(struct uart_por
  
        /* Clear RIE (Receive Interrupt Enable) bit in SCSCR */
        ctrl = sci_in(port, SCSCR);
 +      if (port->type == PORT_SCIFA)
 +              ctrl &= ~0x4000;
        ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE);
        sci_out(port, SCSCR, ctrl);
  }
@@@ -1263,12 -1251,8 +1264,12 @@@ static void rx_timer_fn(unsigned long a
  {
        struct sci_port *s = (struct sci_port *)arg;
        struct uart_port *port = &s->port;
 -
        u16 scr = sci_in(port, SCSCR);
 +
 +      if (port->type == PORT_SCIFA) {
 +              scr &= ~0x4000;
 +              enable_irq(s->irqs[1]);
 +      }
        sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE);
        dev_dbg(port->dev, "DMA Rx timed out\n");
        schedule_work(&s->work_rx);
@@@ -1355,7 -1339,8 +1356,7 @@@ static void sci_request_dma(struct uart
                        sg_init_table(sg, 1);
                        sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
                                    (int)buf[i] & ~PAGE_MASK);
 -                      sg->dma_address = dma[i];
 -                      sg->dma_length = sg->length;
 +                      sg_dma_address(sg) = dma[i];
                }
  
                INIT_WORK(&s->work_rx, work_fn_rx);
@@@ -1418,12 -1403,8 +1419,12 @@@ static void sci_shutdown(struct uart_po
  static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
                            struct ktermios *old)
  {
 +#ifdef CONFIG_SERIAL_SH_SCI_DMA
 +      struct sci_port *s = to_sci_port(port);
 +#endif
        unsigned int status, baud, smr_val, max_baud;
        int t = -1;
 +      u16 scfcr = 0;
  
        /*
         * earlyprintk comes here early on with port->uartclk set to zero.
        sci_out(port, SCSCR, 0x00);     /* TE=0, RE=0, CKE1=0 */
  
        if (port->type != PORT_SCI)
 -              sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
 +              sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
  
        smr_val = sci_in(port, SCSMR) & 3;
        if ((termios->c_cflag & CSIZE) == CS7)
        }
  
        sci_init_pins(port, termios->c_cflag);
 -      sci_out(port, SCFCR, (termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0);
 +      sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
  
        sci_out(port, SCSCR, SCSCR_INIT(port));
  
 +#ifdef CONFIG_SERIAL_SH_SCI_DMA
 +      /*
 +       * Calculate delay for 1.5 DMA buffers: see
 +       * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
 +       * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
 +       * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
 +       * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
 +       * sizes), but it has been found out experimentally, that this is not
 +       * enough: the driver too often needlessly runs on a DMA timeout. 20ms
 +       * as a minimum seem to work perfectly.
 +       */
 +      if (s->chan_rx) {
 +              s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
 +                      port->fifosize / 2;
 +              dev_dbg(port->dev,
 +                      "DMA Rx t-out %ums, tty t-out %u jiffies\n",
 +                      s->rx_timeout * 1000 / HZ, port->timeout);
 +              if (s->rx_timeout < msecs_to_jiffies(20))
 +                      s->rx_timeout = msecs_to_jiffies(20);
 +      }
 +#endif
 +
        if ((termios->c_cflag & CREAD) != 0)
                sci_start_rx(port);
  }
@@@ -1594,10 -1553,10 +1595,10 @@@ static struct uart_ops sci_uart_ops = 
  #endif
  };
  
 -static void __devinit sci_init_single(struct platform_device *dev,
 -                                    struct sci_port *sci_port,
 -                                    unsigned int index,
 -                                    struct plat_sci_port *p)
 +static int __devinit sci_init_single(struct platform_device *dev,
 +                                   struct sci_port *sci_port,
 +                                   unsigned int index,
 +                                   struct plat_sci_port *p)
  {
        struct uart_port *port = &sci_port->port;
  
        }
  
        if (dev) {
 -              sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL;
 -              sci_port->dclk = clk_get(&dev->dev, "peripheral_clk");
 +              sci_port->iclk = clk_get(&dev->dev, "sci_ick");
 +              if (IS_ERR(sci_port->iclk)) {
 +                      sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
 +                      if (IS_ERR(sci_port->iclk)) {
 +                              dev_err(&dev->dev, "can't get iclk\n");
 +                              return PTR_ERR(sci_port->iclk);
 +                      }
 +              }
 +
 +              /*
 +               * The function clock is optional, ignore it if we can't
 +               * find it.
 +               */
 +              sci_port->fclk = clk_get(&dev->dev, "sci_fck");
 +              if (IS_ERR(sci_port->fclk))
 +                      sci_port->fclk = NULL;
 +
                sci_port->enable = sci_clk_enable;
                sci_port->disable = sci_clk_disable;
                port->dev = &dev->dev;
  #endif
  
        memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
 +      return 0;
  }
  
  #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
@@@ -1811,11 -1754,8 +1812,11 @@@ static int sci_remove(struct platform_d
        cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
  
        spin_lock_irqsave(&priv->lock, flags);
 -      list_for_each_entry(p, &priv->ports, node)
 +      list_for_each_entry(p, &priv->ports, node) {
                uart_remove_one_port(&sci_uart_driver, &p->port);
 +              clk_put(p->iclk);
 +              clk_put(p->fclk);
 +      }
        spin_unlock_irqrestore(&priv->lock, flags);
  
        kfree(priv);
@@@ -1841,9 -1781,7 +1842,9 @@@ static int __devinit sci_probe_single(s
                return 0;
        }
  
 -      sci_init_single(dev, sciport, index, p);
 +      ret = sci_init_single(dev, sciport, index, p);
 +      if (ret)
 +              return ret;
  
        ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
        if (ret)
diff --combined drivers/sh/intc.c
@@@ -20,6 -20,7 +20,7 @@@
  #include <linux/irq.h>
  #include <linux/module.h>
  #include <linux/io.h>
+ #include <linux/slab.h>
  #include <linux/interrupt.h>
  #include <linux/sh_intc.h>
  #include <linux/sysdev.h>
@@@ -27,7 -28,6 +28,7 @@@
  #include <linux/topology.h>
  #include <linux/bitmap.h>
  #include <linux/cpumask.h>
 +#include <asm/sizes.h>
  
  #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
        ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@@ -45,12 -45,6 +46,12 @@@ struct intc_handle_int 
        unsigned long handle;
  };
  
 +struct intc_window {
 +      phys_addr_t phys;
 +      void __iomem *virt;
 +      unsigned long size;
 +};
 +
  struct intc_desc_int {
        struct list_head list;
        struct sys_device sysdev;
@@@ -64,8 -58,6 +65,8 @@@
        unsigned int nr_prio;
        struct intc_handle_int *sense;
        unsigned int nr_sense;
 +      struct intc_window *window;
 +      unsigned int nr_windows;
        struct irq_chip chip;
  };
  
@@@ -95,12 -87,8 +96,12 @@@ static DEFINE_SPINLOCK(vector_lock)
  #define SMP_NR(d, x) 1
  #endif
  
 -static unsigned int intc_prio_level[NR_IRQS]; /* for now */
 +static unsigned int intc_prio_level[NR_IRQS]; /* for now */
 +static unsigned int default_prio_level = 2;   /* 2 - 16 */
  static unsigned long ack_handle[NR_IRQS];
 +#ifdef CONFIG_INTC_BALANCING
 +static unsigned long dist_handle[NR_IRQS];
 +#endif
  
  static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
  {
        return container_of(chip, struct intc_desc_int, chip);
  }
  
 +static unsigned long intc_phys_to_virt(struct intc_desc_int *d,
 +                                     unsigned long address)
 +{
 +      struct intc_window *window;
 +      int k;
 +
 +      /* scan through physical windows and convert address */
 +      for (k = 0; k < d->nr_windows; k++) {
 +              window = d->window + k;
 +
 +              if (address < window->phys)
 +                      continue;
 +
 +              if (address >= (window->phys + window->size))
 +                      continue;
 +
 +              address -= window->phys;
 +              address += (unsigned long)window->virt;
 +
 +              return address;
 +      }
 +
 +      /* no windows defined, register must be 1:1 mapped virt:phys */
 +      return address;
 +}
 +
 +static unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
 +{
 +      unsigned int k;
 +
 +      address = intc_phys_to_virt(d, address);
 +
 +      for (k = 0; k < d->nr_reg; k++) {
 +              if (d->reg[k] == address)
 +                      return k;
 +      }
 +
 +      BUG();
 +      return 0;
 +}
 +
  static inline unsigned int set_field(unsigned int value,
                                     unsigned int field_value,
                                     unsigned int handle)
@@@ -282,85 -229,6 +283,85 @@@ static void (*intc_disable_fns[])(unsig
        [MODE_PCLR_REG] = intc_mode_field,
  };
  
 +#ifdef CONFIG_INTC_BALANCING
 +static inline void intc_balancing_enable(unsigned int irq)
 +{
 +      struct intc_desc_int *d = get_intc_desc(irq);
 +      unsigned long handle = dist_handle[irq];
 +      unsigned long addr;
 +
 +      if (irq_balancing_disabled(irq) || !handle)
 +              return;
 +
 +      addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
 +      intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
 +}
 +
 +static inline void intc_balancing_disable(unsigned int irq)
 +{
 +      struct intc_desc_int *d = get_intc_desc(irq);
 +      unsigned long handle = dist_handle[irq];
 +      unsigned long addr;
 +
 +      if (irq_balancing_disabled(irq) || !handle)
 +              return;
 +
 +      addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
 +      intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
 +}
 +
 +static unsigned int intc_dist_data(struct intc_desc *desc,
 +                                 struct intc_desc_int *d,
 +                                 intc_enum enum_id)
 +{
 +      struct intc_mask_reg *mr = desc->hw.mask_regs;
 +      unsigned int i, j, fn, mode;
 +      unsigned long reg_e, reg_d;
 +
 +      for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
 +              mr = desc->hw.mask_regs + i;
 +
 +              /*
 +               * Skip this entry if there's no auto-distribution
 +               * register associated with it.
 +               */
 +              if (!mr->dist_reg)
 +                      continue;
 +
 +              for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
 +                      if (mr->enum_ids[j] != enum_id)
 +                              continue;
 +
 +                      fn = REG_FN_MODIFY_BASE;
 +                      mode = MODE_ENABLE_REG;
 +                      reg_e = mr->dist_reg;
 +                      reg_d = mr->dist_reg;
 +
 +                      fn += (mr->reg_width >> 3) - 1;
 +                      return _INTC_MK(fn, mode,
 +                                      intc_get_reg(d, reg_e),
 +                                      intc_get_reg(d, reg_d),
 +                                      1,
 +                                      (mr->reg_width - 1) - j);
 +              }
 +      }
 +
 +      /*
 +       * It's possible we've gotten here with no distribution options
 +       * available for the IRQ in question, so we just skip over those.
 +       */
 +      return 0;
 +}
 +#else
 +static inline void intc_balancing_enable(unsigned int irq)
 +{
 +}
 +
 +static inline void intc_balancing_disable(unsigned int irq)
 +{
 +}
 +#endif
 +
  static inline void _intc_enable(unsigned int irq, unsigned long handle)
  {
        struct intc_desc_int *d = get_intc_desc(irq);
                intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
                                                    [_INTC_FN(handle)], irq);
        }
 +
 +      intc_balancing_enable(irq);
  }
  
  static void intc_enable(unsigned int irq)
  static void intc_disable(unsigned int irq)
  {
        struct intc_desc_int *d = get_intc_desc(irq);
 -      unsigned long handle = (unsigned long) get_irq_chip_data(irq);
 +      unsigned long handle = (unsigned long)get_irq_chip_data(irq);
        unsigned long addr;
        unsigned int cpu;
  
 +      intc_balancing_disable(irq);
 +
        for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
  #ifdef CONFIG_SMP
                if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
@@@ -472,7 -336,8 +473,7 @@@ static void intc_mask_ack(unsigned int 
  
        intc_disable(irq);
  
 -      /* read register and write zero only to the assocaited bit */
 -
 +      /* read register and write zero only to the associated bit */
        if (handle) {
                addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
                switch (_INTC_FN(handle)) {
@@@ -501,8 -366,7 +502,8 @@@ static struct intc_handle_int *intc_fin
  {
        int i;
  
 -      /* this doesn't scale well, but...
 +      /*
 +       * this doesn't scale well, but...
         *
         * this function should only be used for cerain uncommon
         * operations such as intc_set_priority() and intc_set_sense()
         * memory footprint down is to make sure the array is sorted
         * and then perform a bisect to lookup the irq.
         */
 -
        for (i = 0; i < nr_hp; i++) {
                if ((hp + i)->irq != irq)
                        continue;
@@@ -543,6 -408,7 +544,6 @@@ int intc_set_priority(unsigned int irq
                 * primary masking method is using intc_prio_level[irq]
                 * priority level will be set during next enable()
                 */
 -
                if (_INTC_FN(ihp->handle) != REG_FN_ERR)
                        _intc_enable(irq, ihp->handle);
        }
@@@ -581,6 -447,20 +582,6 @@@ static int intc_set_sense(unsigned int 
        return 0;
  }
  
 -static unsigned int __init intc_get_reg(struct intc_desc_int *d,
 -                               unsigned long address)
 -{
 -      unsigned int k;
 -
 -      for (k = 0; k < d->nr_reg; k++) {
 -              if (d->reg[k] == address)
 -                      return k;
 -      }
 -
 -      BUG();
 -      return 0;
 -}
 -
  static intc_enum __init intc_grp_id(struct intc_desc *desc,
                                    intc_enum enum_id)
  {
@@@ -838,14 -718,13 +839,14 @@@ static void __init intc_register_irq(st
         */
        set_bit(irq, intc_irq_map);
  
 -      /* Prefer single interrupt source bitmap over other combinations:
 +      /*
 +       * Prefer single interrupt source bitmap over other combinations:
 +       *
         * 1. bitmap, single interrupt source
         * 2. priority, single interrupt source
         * 3. bitmap, multiple interrupt sources (groups)
         * 4. priority, multiple interrupt sources (groups)
         */
 -
        data[0] = intc_mask_data(desc, d, enum_id, 0);
        data[1] = intc_prio_data(desc, d, enum_id, 0);
  
                                      handle_level_irq, "level");
        set_irq_chip_data(irq, (void *)data[primary]);
  
 -      /* set priority level
 +      /*
 +       * set priority level
         * - this needs to be at least 2 for 5-bit priorities on 7780
         */
 -      intc_prio_level[irq] = 2;
 +      intc_prio_level[irq] = default_prio_level;
  
        /* enable secondary masking method if present */
        if (data[!primary])
                         * only secondary priority should access registers, so
                         * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
                         */
 -
                        hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
                        hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
                }
        if (desc->hw.ack_regs)
                ack_handle[irq] = intc_ack_data(desc, d, enum_id);
  
 +#ifdef CONFIG_INTC_BALANCING
 +      if (desc->hw.mask_regs)
 +              dist_handle[irq] = intc_dist_data(desc, d, enum_id);
 +#endif
 +
  #ifdef CONFIG_ARM
        set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
  #endif
@@@ -927,8 -801,6 +928,8 @@@ static unsigned int __init save_reg(str
                                    unsigned int smp)
  {
        if (value) {
 +              value = intc_phys_to_virt(d, value);
 +
                d->reg[cnt] = value;
  #ifdef CONFIG_SMP
                d->smp[cnt] = smp;
@@@ -944,59 -816,25 +945,59 @@@ static void intc_redirect_irq(unsigned 
        generic_handle_irq((unsigned int)get_irq_data(irq));
  }
  
 -void __init register_intc_controller(struct intc_desc *desc)
 +int __init register_intc_controller(struct intc_desc *desc)
  {
        unsigned int i, k, smp;
        struct intc_hw_desc *hw = &desc->hw;
        struct intc_desc_int *d;
 +      struct resource *res;
 +
 +      pr_info("intc: Registered controller '%s' with %u IRQs\n",
 +              desc->name, hw->nr_vectors);
  
        d = kzalloc(sizeof(*d), GFP_NOWAIT);
 +      if (!d)
 +              goto err0;
  
        INIT_LIST_HEAD(&d->list);
        list_add(&d->list, &intc_list);
  
 +      if (desc->num_resources) {
 +              d->nr_windows = desc->num_resources;
 +              d->window = kzalloc(d->nr_windows * sizeof(*d->window),
 +                                  GFP_NOWAIT);
 +              if (!d->window)
 +                      goto err1;
 +
 +              for (k = 0; k < d->nr_windows; k++) {
 +                      res = desc->resource + k;
 +                      WARN_ON(resource_type(res) != IORESOURCE_MEM);
 +                      d->window[k].phys = res->start;
 +                      d->window[k].size = resource_size(res);
 +                      d->window[k].virt = ioremap_nocache(res->start,
 +                                                       resource_size(res));
 +                      if (!d->window[k].virt)
 +                              goto err2;
 +              }
 +      }
 +
        d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
 +#ifdef CONFIG_INTC_BALANCING
 +      if (d->nr_reg)
 +              d->nr_reg += hw->nr_mask_regs;
 +#endif
        d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
        d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
        d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
  
        d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
 +      if (!d->reg)
 +              goto err2;
 +
  #ifdef CONFIG_SMP
        d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
 +      if (!d->smp)
 +              goto err3;
  #endif
        k = 0;
  
                        smp = IS_SMP(hw->mask_regs[i]);
                        k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
                        k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
 +#ifdef CONFIG_INTC_BALANCING
 +                      k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
 +#endif
                }
        }
  
        if (hw->prio_regs) {
                d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
                                  GFP_NOWAIT);
 +              if (!d->prio)
 +                      goto err4;
  
                for (i = 0; i < hw->nr_prio_regs; i++) {
                        smp = IS_SMP(hw->prio_regs[i]);
        if (hw->sense_regs) {
                d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
                                   GFP_NOWAIT);
 +              if (!d->sense)
 +                      goto err5;
  
                for (i = 0; i < hw->nr_sense_regs; i++)
                        k += save_reg(d, k, hw->sense_regs[i].reg, 0);
  
                irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
                if (unlikely(!irq_desc)) {
 -                      pr_info("can't get irq_desc for %d\n", irq);
 +                      pr_err("can't get irq_desc for %d\n", irq);
                        continue;
                }
  
                         */
                        irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
                        if (unlikely(!irq_desc)) {
 -                              pr_info("can't get irq_desc for %d\n", irq2);
 +                              pr_err("can't get irq_desc for %d\n", irq2);
                                continue;
                        }
  
        /* enable bits matching force_enable after registering irqs */
        if (desc->force_enable)
                intc_enable_disable_enum(desc, d, desc->force_enable, 1);
 +
 +      return 0;
 +err5:
 +      kfree(d->prio);
 +err4:
 +#ifdef CONFIG_SMP
 +      kfree(d->smp);
 +err3:
 +#endif
 +      kfree(d->reg);
 +err2:
 +      for (k = 0; k < d->nr_windows; k++)
 +              if (d->window[k].virt)
 +                      iounmap(d->window[k].virt);
 +
 +      kfree(d->window);
 +err1:
 +      kfree(d);
 +err0:
 +      pr_err("unable to allocate INTC memory\n");
 +
 +      return -ENOMEM;
 +}
 +
 +#ifdef CONFIG_INTC_USERIMASK
 +static void __iomem *uimask;
 +
 +int register_intc_userimask(unsigned long addr)
 +{
 +      if (unlikely(uimask))
 +              return -EBUSY;
 +
 +      uimask = ioremap_nocache(addr, SZ_4K);
 +      if (unlikely(!uimask))
 +              return -ENOMEM;
 +
 +      pr_info("intc: userimask support registered for levels 0 -> %d\n",
 +              default_prio_level - 1);
 +
 +      return 0;
 +}
 +
 +static ssize_t
 +show_intc_userimask(struct sysdev_class *cls,
 +                  struct sysdev_class_attribute *attr, char *buf)
 +{
 +      return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
 +}
 +
 +static ssize_t
 +store_intc_userimask(struct sysdev_class *cls,
 +                   struct sysdev_class_attribute *attr,
 +                   const char *buf, size_t count)
 +{
 +      unsigned long level;
 +
 +      level = simple_strtoul(buf, NULL, 10);
 +
 +      /*
 +       * Minimal acceptable IRQ levels are in the 2 - 16 range, but
 +       * these are chomped so as to not interfere with normal IRQs.
 +       *
 +       * Level 1 is a special case on some CPUs in that it's not
 +       * directly settable, but given that USERIMASK cuts off below a
 +       * certain level, we don't care about this limitation here.
 +       * Level 0 on the other hand equates to user masking disabled.
 +       *
 +       * We use default_prio_level as a cut off so that only special
 +       * case opt-in IRQs can be mangled.
 +       */
 +      if (level >= default_prio_level)
 +              return -EINVAL;
 +
 +      __raw_writel(0xa5 << 24 | level << 4, uimask);
 +
 +      return count;
  }
  
 +static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
 +                       show_intc_userimask, store_intc_userimask);
 +#endif
 +
 +static ssize_t
 +show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
 +{
 +      struct intc_desc_int *d;
 +
 +      d = container_of(dev, struct intc_desc_int, sysdev);
 +
 +      return sprintf(buf, "%s\n", d->chip.name);
 +}
 +
 +static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
 +
  static int intc_suspend(struct sys_device *dev, pm_message_t state)
  {
        struct intc_desc_int *d;
@@@ -1264,28 -1003,19 +1265,28 @@@ static int __init register_intc_sysdevs
        int id = 0;
  
        error = sysdev_class_register(&intc_sysdev_class);
 +#ifdef CONFIG_INTC_USERIMASK
 +      if (!error && uimask)
 +              error = sysdev_class_create_file(&intc_sysdev_class,
 +                                               &attr_userimask);
 +#endif
        if (!error) {
                list_for_each_entry(d, &intc_list, list) {
                        d->sysdev.id = id;
                        d->sysdev.cls = &intc_sysdev_class;
                        error = sysdev_register(&d->sysdev);
 +                      if (error == 0)
 +                              error = sysdev_create_file(&d->sysdev,
 +                                                         &attr_name);
                        if (error)
                                break;
 +
                        id++;
                }
        }
  
        if (error)
 -              pr_warning("intc: sysdev registration error\n");
 +              pr_err("intc: sysdev registration error\n");
  
        return error;
  }
@@@ -1318,7 -1048,7 +1319,7 @@@ unsigned int create_irq_nr(unsigned in
  
        desc = irq_to_desc_alloc_node(new, node);
        if (unlikely(!desc)) {
 -              pr_info("can't get irq_desc for %d\n", new);
 +              pr_err("can't get irq_desc for %d\n", new);
                goto out_unlock;
        }