Merge git://git.infradead.org/~dwmw2/mtd-2.6.31
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 5 Sep 2009 21:57:04 +0000 (14:57 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 5 Sep 2009 21:57:04 +0000 (14:57 -0700)
* git://git.infradead.org/~dwmw2/mtd-2.6.31:
  JFFS2: add missing verify buffer allocation/deallocation
  mtd: nftl: fix offset alignments
  mtd: nftl: write support is broken
  mtd: m25p80: fix null pointer dereference bug

55 files changed:
arch/ia64/kernel/dma-mapping.c
arch/ia64/lib/ip_fast_csum.S
arch/powerpc/kernel/power7-pmu.c
arch/sparc/kernel/irq_64.c
arch/sparc/kernel/nmi.c
arch/sparc/prom/misc_64.c
arch/sparc/prom/printf.c
block/blk-sysfs.c
crypto/algapi.c
drivers/char/n_tty.c
drivers/char/pty.c
drivers/cpufreq/cpufreq.c
drivers/input/keyboard/atkbd.c
drivers/input/serio/i8042-x86ia64io.h
drivers/md/dm-exception-store.c
drivers/md/dm-exception-store.h
drivers/md/dm-log-userspace-base.c
drivers/md/dm-log-userspace-transfer.c
drivers/md/dm-log-userspace-transfer.h
drivers/md/dm-raid1.c
drivers/md/dm-snap-persistent.c
drivers/md/dm-snap.c
drivers/md/dm-stripe.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/net/gianfar.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/pci/iov.c
drivers/pci/pci.h
drivers/pci/setup-bus.c
drivers/pci/setup-res.c
fs/compat.c
fs/exec.c
fs/ext2/namei.c
fs/nilfs2/btnode.c
fs/ocfs2/aops.c
fs/ocfs2/dcache.c
fs/xfs/linux-2.6/xfs_ioctl32.c
include/crypto/algapi.h
include/crypto/internal/skcipher.h
include/linux/binfmts.h
include/linux/device-mapper.h
include/linux/dm-log-userspace.h
include/linux/workqueue.h
include/net/pkt_sched.h
kernel/perf_counter.c
mm/nommu.c
mm/page_alloc.c
mm/percpu.c
mm/slub.c
net/core/sock.c
net/sched/sch_api.c
net/sched/sch_cbq.c
sound/pci/oxygen/oxygen_lib.c
sound/pci/oxygen/oxygen_pcm.c

index 39a3cd0..f2c1600 100644 (file)
@@ -10,7 +10,9 @@ EXPORT_SYMBOL(dma_ops);
 
 static int __init dma_init(void)
 {
-       dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+       dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+
+       return 0;
 }
 fs_initcall(dma_init);
 
index 1f86aeb..620d9dc 100644 (file)
@@ -96,20 +96,22 @@ END(ip_fast_csum)
 GLOBAL_ENTRY(csum_ipv6_magic)
        ld4     r20=[in0],4
        ld4     r21=[in1],4
-       dep     r15=in3,in2,32,16
+       zxt4    in2=in2
        ;;
        ld4     r22=[in0],4
        ld4     r23=[in1],4
-       mux1    r15=r15,@rev
+       dep     r15=in3,in2,32,16
        ;;
        ld4     r24=[in0],4
        ld4     r25=[in1],4
-       shr.u   r15=r15,16
+       mux1    r15=r15,@rev
        add     r16=r20,r21
        add     r17=r22,r23
+       zxt4    in4=in4
        ;;
        ld4     r26=[in0],4
        ld4     r27=[in1],4
+       shr.u   r15=r15,16
        add     r18=r24,r25
        add     r8=r16,r17
        ;;
index 388cf57..018d094 100644 (file)
@@ -317,7 +317,7 @@ static int power7_generic_events[] = {
  */
 static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
        [C(L1D)] = {            /*      RESULT_ACCESS   RESULT_MISS */
-               [C(OP_READ)] = {        0x400f0,        0xc880  },
+               [C(OP_READ)] = {        0xc880,         0x400f0 },
                [C(OP_WRITE)] = {       0,              0x300f0 },
                [C(OP_PREFETCH)] = {    0xd8b8,         0       },
        },
@@ -327,8 +327,8 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
                [C(OP_PREFETCH)] = {    0x408a,         0       },
        },
        [C(LL)] = {             /*      RESULT_ACCESS   RESULT_MISS */
-               [C(OP_READ)] = {        0x6080,         0x6084  },
-               [C(OP_WRITE)] = {       0x6082,         0x6086  },
+               [C(OP_READ)] = {        0x16080,        0x26080 },
+               [C(OP_WRITE)] = {       0x16082,        0x26082 },
                [C(OP_PREFETCH)] = {    0,              0       },
        },
        [C(DTLB)] = {           /*      RESULT_ACCESS   RESULT_MISS */
index f0ee790..8daab33 100644 (file)
@@ -886,7 +886,7 @@ void notrace init_irqwork_curcpu(void)
  * Therefore you cannot make any OBP calls, not even prom_printf,
  * from these two routines.
  */
-static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
+static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
 {
        unsigned long num_entries = (qmask + 1) / 64;
        unsigned long status;
index 2c0cc72..b75bf50 100644 (file)
@@ -103,7 +103,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
        }
        if (!touched && __get_cpu_var(last_irq_sum) == sum) {
                local_inc(&__get_cpu_var(alert_counter));
-               if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
+               if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz)
                        die_nmi("BUG: NMI Watchdog detected LOCKUP",
                                regs, panic_on_timeout);
        } else {
index eedffb4..39fc6af 100644 (file)
@@ -88,7 +88,7 @@ void prom_cmdline(void)
 /* Drop into the prom, but completely terminate the program.
  * No chance of continuing.
  */
-void prom_halt(void)
+void notrace prom_halt(void)
 {
 #ifdef CONFIG_SUN_LDOMS
        if (ldom_domaining_enabled)
index 660943e..ca86926 100644 (file)
  */
 
 #include <linux/kernel.h>
+#include <linux/compiler.h>
 
 #include <asm/openprom.h>
 #include <asm/oplib.h>
 
 static char ppbuf[1024];
 
-void
-prom_write(const char *buf, unsigned int n)
+void notrace prom_write(const char *buf, unsigned int n)
 {
        char ch;
 
@@ -33,8 +33,7 @@ prom_write(const char *buf, unsigned int n)
        }
 }
 
-void
-prom_printf(const char *fmt, ...)
+void notrace prom_printf(const char *fmt, ...)
 {
        va_list args;
        int i;
index 418d636..d3aa2aa 100644 (file)
@@ -133,7 +133,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
                return -EINVAL;
 
        spin_lock_irq(q->queue_lock);
-       blk_queue_max_sectors(q, max_sectors_kb << 1);
+       q->limits.max_sectors = max_sectors_kb << 1;
        spin_unlock_irq(q->queue_lock);
 
        return ret;
index 56c62e2..df0863d 100644 (file)
@@ -692,7 +692,7 @@ out:
 }
 EXPORT_SYMBOL_GPL(crypto_enqueue_request);
 
-struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
+void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset)
 {
        struct list_head *request;
 
@@ -707,7 +707,14 @@ struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
        request = queue->list.next;
        list_del(request);
 
-       return list_entry(request, struct crypto_async_request, list);
+       return (char *)list_entry(request, struct crypto_async_request, list) -
+              offset;
+}
+EXPORT_SYMBOL_GPL(__crypto_dequeue_request);
+
+struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
+{
+       return __crypto_dequeue_request(queue, 0);
 }
 EXPORT_SYMBOL_GPL(crypto_dequeue_request);
 
index 973be2f..4e28b35 100644 (file)
@@ -300,8 +300,7 @@ static int do_output_char(unsigned char c, struct tty_struct *tty, int space)
                        if (space < 2)
                                return -1;
                        tty->canon_column = tty->column = 0;
-                       tty_put_char(tty, '\r');
-                       tty_put_char(tty, c);
+                       tty->ops->write(tty, "\r\n", 2);
                        return 2;
                }
                tty->canon_column = tty->column;
index d083c73..b33d668 100644 (file)
@@ -109,21 +109,13 @@ static int pty_space(struct tty_struct *to)
  *     the other side of the pty/tty pair.
  */
 
-static int pty_write(struct tty_struct *tty, const unsigned char *buf,
-                                                               int count)
+static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
 {
        struct tty_struct *to = tty->link;
-       int c;
 
        if (tty->stopped)
                return 0;
 
-       /* This isn't locked but our 8K is quite sloppy so no
-          big deal */
-
-       c = pty_space(to);
-       if (c > count)
-               c = count;
        if (c > 0) {
                /* Stuff the data into the input queue of the other end */
                c = tty_insert_flip_string(to, buf, c);
index fd69086..2968ed6 100644 (file)
@@ -1250,20 +1250,11 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
 {
        int ret = 0;
 
-#ifdef __powerpc__
        int cpu = sysdev->id;
-       unsigned int cur_freq = 0;
        struct cpufreq_policy *cpu_policy;
 
        dprintk("suspending cpu %u\n", cpu);
 
-       /*
-        * This whole bogosity is here because Powerbooks are made of fail.
-        * No sane platform should need any of the code below to be run.
-        * (it's entirely the wrong thing to do, as driver->get may
-        *  reenable interrupts on some architectures).
-        */
-
        if (!cpu_online(cpu))
                return 0;
 
@@ -1282,47 +1273,13 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
 
        if (cpufreq_driver->suspend) {
                ret = cpufreq_driver->suspend(cpu_policy, pmsg);
-               if (ret) {
+               if (ret)
                        printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
                                        "step on CPU %u\n", cpu_policy->cpu);
-                       goto out;
-               }
-       }
-
-       if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
-               goto out;
-
-       if (cpufreq_driver->get)
-               cur_freq = cpufreq_driver->get(cpu_policy->cpu);
-
-       if (!cur_freq || !cpu_policy->cur) {
-               printk(KERN_ERR "cpufreq: suspend failed to assert current "
-                      "frequency is what timing core thinks it is.\n");
-               goto out;
-       }
-
-       if (unlikely(cur_freq != cpu_policy->cur)) {
-               struct cpufreq_freqs freqs;
-
-               if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
-                       dprintk("Warning: CPU frequency is %u, "
-                              "cpufreq assumed %u kHz.\n",
-                              cur_freq, cpu_policy->cur);
-
-               freqs.cpu = cpu;
-               freqs.old = cpu_policy->cur;
-               freqs.new = cur_freq;
-
-               srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
-                                   CPUFREQ_SUSPENDCHANGE, &freqs);
-               adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
-
-               cpu_policy->cur = cur_freq;
        }
 
 out:
        cpufreq_cpu_put(cpu_policy);
-#endif /* __powerpc__ */
        return ret;
 }
 
@@ -1330,24 +1287,21 @@ out:
  *     cpufreq_resume -  restore proper CPU frequency handling after resume
  *
  *     1.) resume CPUfreq hardware support (cpufreq_driver->resume())
- *     2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync
- *     3.) schedule call cpufreq_update_policy() ASAP as interrupts are
- *         restored.
+ *     2.) schedule call cpufreq_update_policy() ASAP as interrupts are
+ *         restored. It will verify that the current freq is in sync with
+ *         what we believe it to be. This is a bit later than when it
+ *         should be, but nonethteless it's better than calling
+ *         cpufreq_driver->get() here which might re-enable interrupts...
  */
 static int cpufreq_resume(struct sys_device *sysdev)
 {
        int ret = 0;
 
-#ifdef __powerpc__
        int cpu = sysdev->id;
        struct cpufreq_policy *cpu_policy;
 
        dprintk("resuming cpu %u\n", cpu);
 
-       /* As with the ->suspend method, all the code below is
-        * only necessary because Powerbooks suck.
-        * See commit 42d4dc3f4e1e for jokes. */
-
        if (!cpu_online(cpu))
                return 0;
 
@@ -1373,45 +1327,10 @@ static int cpufreq_resume(struct sys_device *sysdev)
                }
        }
 
-       if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
-               unsigned int cur_freq = 0;
-
-               if (cpufreq_driver->get)
-                       cur_freq = cpufreq_driver->get(cpu_policy->cpu);
-
-               if (!cur_freq || !cpu_policy->cur) {
-                       printk(KERN_ERR "cpufreq: resume failed to assert "
-                                       "current frequency is what timing core "
-                                       "thinks it is.\n");
-                       goto out;
-               }
-
-               if (unlikely(cur_freq != cpu_policy->cur)) {
-                       struct cpufreq_freqs freqs;
-
-                       if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
-                               dprintk("Warning: CPU frequency "
-                                      "is %u, cpufreq assumed %u kHz.\n",
-                                      cur_freq, cpu_policy->cur);
-
-                       freqs.cpu = cpu;
-                       freqs.old = cpu_policy->cur;
-                       freqs.new = cur_freq;
-
-                       srcu_notifier_call_chain(
-                                       &cpufreq_transition_notifier_list,
-                                       CPUFREQ_RESUMECHANGE, &freqs);
-                       adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
-
-                       cpu_policy->cur = cur_freq;
-               }
-       }
-
-out:
        schedule_work(&cpu_policy->update);
+
 fail:
        cpufreq_cpu_put(cpu_policy);
-#endif /* __powerpc__ */
        return ret;
 }
 
index 95fe045..6c6a09b 100644 (file)
@@ -879,6 +879,14 @@ static unsigned int atkbd_hp_zv6100_forced_release_keys[] = {
        0xae, 0xb0, -1U
 };
 
+/*
+ * Perform fixup for HP (Compaq) Presario R4000 R4100 R4200 that don't generate
+ * release for their volume buttons
+ */
+static unsigned int atkbd_hp_r4000_forced_release_keys[] = {
+       0xae, 0xb0, -1U
+};
+
 /*
  * Samsung NC10,NC20 with Fn+F? key release not working
  */
@@ -1536,6 +1544,33 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
                .callback = atkbd_setup_forced_release,
                .driver_data = atkbd_hp_zv6100_forced_release_keys,
        },
+       {
+               .ident = "HP Presario R4000",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4000"),
+               },
+               .callback = atkbd_setup_forced_release,
+               .driver_data = atkbd_hp_r4000_forced_release_keys,
+       },
+       {
+               .ident = "HP Presario R4100",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4100"),
+               },
+               .callback = atkbd_setup_forced_release,
+               .driver_data = atkbd_hp_r4000_forced_release_keys,
+       },
+       {
+               .ident = "HP Presario R4200",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4200"),
+               },
+               .callback = atkbd_setup_forced_release,
+               .driver_data = atkbd_hp_r4000_forced_release_keys,
+       },
        {
                .ident = "Inventec Symphony",
                .matches = {
index ae04d8a..ccbf23e 100644 (file)
@@ -382,6 +382,14 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
                },
        },
+       {
+               .ident = "Acer Aspire 5536",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+               },
+       },
        { }
 };
 
index 3710ff8..556acff 100644 (file)
@@ -171,6 +171,14 @@ static int set_chunk_size(struct dm_exception_store *store,
         */
        chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9);
 
+       return dm_exception_store_set_chunk_size(store, chunk_size_ulong,
+                                                error);
+}
+
+int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
+                                     unsigned long chunk_size_ulong,
+                                     char **error)
+{
        /* Check chunk_size is a power of 2 */
        if (!is_power_of_2(chunk_size_ulong)) {
                *error = "Chunk size is not a power of 2";
@@ -183,6 +191,11 @@ static int set_chunk_size(struct dm_exception_store *store,
                return -EINVAL;
        }
 
+       if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) {
+               *error = "Chunk size is too high";
+               return -EINVAL;
+       }
+
        store->chunk_size = chunk_size_ulong;
        store->chunk_mask = chunk_size_ulong - 1;
        store->chunk_shift = ffs(chunk_size_ulong) - 1;
index 2442c8c..812c718 100644 (file)
@@ -168,6 +168,10 @@ static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
 int dm_exception_store_type_register(struct dm_exception_store_type *type);
 int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
 
+int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
+                                     unsigned long chunk_size_ulong,
+                                     char **error);
+
 int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
                              unsigned *args_used,
                              struct dm_exception_store **store);
index e69b965..6e186b1 100644 (file)
@@ -21,6 +21,7 @@ struct log_c {
        struct dm_target *ti;
        uint32_t region_size;
        region_t region_count;
+       uint64_t luid;
        char uuid[DM_UUID_LEN];
 
        char *usr_argv_str;
@@ -63,7 +64,7 @@ static int userspace_do_request(struct log_c *lc, const char *uuid,
         * restored.
         */
 retry:
-       r = dm_consult_userspace(uuid, request_type, data,
+       r = dm_consult_userspace(uuid, lc->luid, request_type, data,
                                 data_size, rdata, rdata_size);
 
        if (r != -ESRCH)
@@ -74,14 +75,15 @@ retry:
                set_current_state(TASK_INTERRUPTIBLE);
                schedule_timeout(2*HZ);
                DMWARN("Attempting to contact userspace log server...");
-               r = dm_consult_userspace(uuid, DM_ULOG_CTR, lc->usr_argv_str,
+               r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR,
+                                        lc->usr_argv_str,
                                         strlen(lc->usr_argv_str) + 1,
                                         NULL, NULL);
                if (!r)
                        break;
        }
        DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete");
-       r = dm_consult_userspace(uuid, DM_ULOG_RESUME, NULL,
+       r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL,
                                 0, NULL, NULL);
        if (!r)
                goto retry;
@@ -111,10 +113,9 @@ static int build_constructor_string(struct dm_target *ti,
                return -ENOMEM;
        }
 
-       for (i = 0, str_size = 0; i < argc; i++)
-               str_size += sprintf(str + str_size, "%s ", argv[i]);
-       str_size += sprintf(str + str_size, "%llu",
-                           (unsigned long long)ti->len);
+       str_size = sprintf(str, "%llu", (unsigned long long)ti->len);
+       for (i = 0; i < argc; i++)
+               str_size += sprintf(str + str_size, " %s", argv[i]);
 
        *ctr_str = str;
        return str_size;
@@ -154,6 +155,9 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
                return -ENOMEM;
        }
 
+       /* The ptr value is sufficient for local unique id */
+       lc->luid = (uint64_t)lc;
+
        lc->ti = ti;
 
        if (strlen(argv[0]) > (DM_UUID_LEN - 1)) {
@@ -173,7 +177,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
        }
 
        /* Send table string */
-       r = dm_consult_userspace(lc->uuid, DM_ULOG_CTR,
+       r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
                                 ctr_str, str_size, NULL, NULL);
 
        if (r == -ESRCH) {
@@ -183,7 +187,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
 
        /* Since the region size does not change, get it now */
        rdata_size = sizeof(rdata);
-       r = dm_consult_userspace(lc->uuid, DM_ULOG_GET_REGION_SIZE,
+       r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE,
                                 NULL, 0, (char *)&rdata, &rdata_size);
 
        if (r) {
@@ -212,7 +216,7 @@ static void userspace_dtr(struct dm_dirty_log *log)
        int r;
        struct log_c *lc = log->context;
 
-       r = dm_consult_userspace(lc->uuid, DM_ULOG_DTR,
+       r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
                                 NULL, 0,
                                 NULL, NULL);
 
@@ -227,7 +231,7 @@ static int userspace_presuspend(struct dm_dirty_log *log)
        int r;
        struct log_c *lc = log->context;
 
-       r = dm_consult_userspace(lc->uuid, DM_ULOG_PRESUSPEND,
+       r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND,
                                 NULL, 0,
                                 NULL, NULL);
 
@@ -239,7 +243,7 @@ static int userspace_postsuspend(struct dm_dirty_log *log)
        int r;
        struct log_c *lc = log->context;
 
-       r = dm_consult_userspace(lc->uuid, DM_ULOG_POSTSUSPEND,
+       r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND,
                                 NULL, 0,
                                 NULL, NULL);
 
@@ -252,7 +256,7 @@ static int userspace_resume(struct dm_dirty_log *log)
        struct log_c *lc = log->context;
 
        lc->in_sync_hint = 0;
-       r = dm_consult_userspace(lc->uuid, DM_ULOG_RESUME,
+       r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME,
                                 NULL, 0,
                                 NULL, NULL);
 
@@ -561,6 +565,7 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
                            char *result, unsigned maxlen)
 {
        int r = 0;
+       char *table_args;
        size_t sz = (size_t)maxlen;
        struct log_c *lc = log->context;
 
@@ -577,8 +582,12 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
                break;
        case STATUSTYPE_TABLE:
                sz = 0;
-               DMEMIT("%s %u %s %s", log->type->name, lc->usr_argc + 1,
-                      lc->uuid, lc->usr_argv_str);
+               table_args = strstr(lc->usr_argv_str, " ");
+               BUG_ON(!table_args); /* There will always be a ' ' */
+               table_args++;
+
+               DMEMIT("%s %u %s %s ", log->type->name, lc->usr_argc,
+                      lc->uuid, table_args);
                break;
        }
        return (r) ? 0 : (int)sz;
index 8ce74d9..ba0edad 100644 (file)
@@ -147,7 +147,8 @@ static void cn_ulog_callback(void *data)
 
 /**
  * dm_consult_userspace
- * @uuid: log's uuid (must be DM_UUID_LEN in size)
+ * @uuid: log's universal unique identifier (must be DM_UUID_LEN in size)
+ * @luid: log's local unique identifier
  * @request_type:  found in include/linux/dm-log-userspace.h
  * @data: data to tx to the server
  * @data_size: size of data in bytes
@@ -163,7 +164,7 @@ static void cn_ulog_callback(void *data)
  *
  * Returns: 0 on success, -EXXX on failure
  **/
-int dm_consult_userspace(const char *uuid, int request_type,
+int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
                         char *data, size_t data_size,
                         char *rdata, size_t *rdata_size)
 {
@@ -190,6 +191,7 @@ resend:
 
        memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size);
        memcpy(tfr->uuid, uuid, DM_UUID_LEN);
+       tfr->luid = luid;
        tfr->seq = dm_ulog_seq++;
 
        /*
index c26d8e4..04ee874 100644 (file)
@@ -11,7 +11,7 @@
 
 int dm_ulog_tfr_init(void);
 void dm_ulog_tfr_exit(void);
-int dm_consult_userspace(const char *uuid, int request_type,
+int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
                         char *data, size_t data_size,
                         char *rdata, size_t *rdata_size);
 
index 9726577..33f179e 100644 (file)
@@ -648,7 +648,13 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
         */
        dm_rh_inc_pending(ms->rh, &sync);
        dm_rh_inc_pending(ms->rh, &nosync);
-       ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0;
+
+       /*
+        * If the flush fails on a previous call and succeeds here,
+        * we must not reset the log_failure variable.  We need
+        * userspace interaction to do that.
+        */
+       ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
 
        /*
         * Dispatch io.
index 6e3fe4f..d5b2e08 100644 (file)
@@ -105,6 +105,13 @@ struct pstore {
         */
        void *zero_area;
 
+       /*
+        * An area used for header. The header can be written
+        * concurrently with metadata (when invalidating the snapshot),
+        * so it needs a separate buffer.
+        */
+       void *header_area;
+
        /*
         * Used to keep track of which metadata area the data in
         * 'chunk' refers to.
@@ -148,16 +155,27 @@ static int alloc_area(struct pstore *ps)
         */
        ps->area = vmalloc(len);
        if (!ps->area)
-               return r;
+               goto err_area;
 
        ps->zero_area = vmalloc(len);
-       if (!ps->zero_area) {
-               vfree(ps->area);
-               return r;
-       }
+       if (!ps->zero_area)
+               goto err_zero_area;
        memset(ps->zero_area, 0, len);
 
+       ps->header_area = vmalloc(len);
+       if (!ps->header_area)
+               goto err_header_area;
+
        return 0;
+
+err_header_area:
+       vfree(ps->zero_area);
+
+err_zero_area:
+       vfree(ps->area);
+
+err_area:
+       return r;
 }
 
 static void free_area(struct pstore *ps)
@@ -169,6 +187,10 @@ static void free_area(struct pstore *ps)
        if (ps->zero_area)
                vfree(ps->zero_area);
        ps->zero_area = NULL;
+
+       if (ps->header_area)
+               vfree(ps->header_area);
+       ps->header_area = NULL;
 }
 
 struct mdata_req {
@@ -188,7 +210,8 @@ static void do_metadata(struct work_struct *work)
 /*
  * Read or write a chunk aligned and sized block of data from a device.
  */
-static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
+static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
+                   int metadata)
 {
        struct dm_io_region where = {
                .bdev = ps->store->cow->bdev,
@@ -198,7 +221,7 @@ static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
        struct dm_io_request io_req = {
                .bi_rw = rw,
                .mem.type = DM_IO_VMA,
-               .mem.ptr.vma = ps->area,
+               .mem.ptr.vma = area,
                .client = ps->io_client,
                .notify.fn = NULL,
        };
@@ -240,7 +263,7 @@ static int area_io(struct pstore *ps, int rw)
 
        chunk = area_location(ps, ps->current_area);
 
-       r = chunk_io(ps, chunk, rw, 0);
+       r = chunk_io(ps, ps->area, chunk, rw, 0);
        if (r)
                return r;
 
@@ -254,20 +277,7 @@ static void zero_memory_area(struct pstore *ps)
 
 static int zero_disk_area(struct pstore *ps, chunk_t area)
 {
-       struct dm_io_region where = {
-               .bdev = ps->store->cow->bdev,
-               .sector = ps->store->chunk_size * area_location(ps, area),
-               .count = ps->store->chunk_size,
-       };
-       struct dm_io_request io_req = {
-               .bi_rw = WRITE,
-               .mem.type = DM_IO_VMA,
-               .mem.ptr.vma = ps->zero_area,
-               .client = ps->io_client,
-               .notify.fn = NULL,
-       };
-
-       return dm_io(&io_req, 1, &where, NULL);
+       return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
 }
 
 static int read_header(struct pstore *ps, int *new_snapshot)
@@ -276,6 +286,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
        struct disk_header *dh;
        chunk_t chunk_size;
        int chunk_size_supplied = 1;
+       char *chunk_err;
 
        /*
         * Use default chunk size (or hardsect_size, if larger) if none supplied
@@ -297,11 +308,11 @@ static int read_header(struct pstore *ps, int *new_snapshot)
        if (r)
                return r;
 
-       r = chunk_io(ps, 0, READ, 1);
+       r = chunk_io(ps, ps->header_area, 0, READ, 1);
        if (r)
                goto bad;
 
-       dh = (struct disk_header *) ps->area;
+       dh = ps->header_area;
 
        if (le32_to_cpu(dh->magic) == 0) {
                *new_snapshot = 1;
@@ -319,20 +330,25 @@ static int read_header(struct pstore *ps, int *new_snapshot)
        ps->version = le32_to_cpu(dh->version);
        chunk_size = le32_to_cpu(dh->chunk_size);
 
-       if (!chunk_size_supplied || ps->store->chunk_size == chunk_size)
+       if (ps->store->chunk_size == chunk_size)
                return 0;
 
-       DMWARN("chunk size %llu in device metadata overrides "
-              "table chunk size of %llu.",
-              (unsigned long long)chunk_size,
-              (unsigned long long)ps->store->chunk_size);
+       if (chunk_size_supplied)
+               DMWARN("chunk size %llu in device metadata overrides "
+                      "table chunk size of %llu.",
+                      (unsigned long long)chunk_size,
+                      (unsigned long long)ps->store->chunk_size);
 
        /* We had a bogus chunk_size. Fix stuff up. */
        free_area(ps);
 
-       ps->store->chunk_size = chunk_size;
-       ps->store->chunk_mask = chunk_size - 1;
-       ps->store->chunk_shift = ffs(chunk_size) - 1;
+       r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
+                                             &chunk_err);
+       if (r) {
+               DMERR("invalid on-disk chunk size %llu: %s.",
+                     (unsigned long long)chunk_size, chunk_err);
+               return r;
+       }
 
        r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
                                ps->io_client);
@@ -351,15 +367,15 @@ static int write_header(struct pstore *ps)
 {
        struct disk_header *dh;
 
-       memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
+       memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
 
-       dh = (struct disk_header *) ps->area;
+       dh = ps->header_area;
        dh->magic = cpu_to_le32(SNAP_MAGIC);
        dh->valid = cpu_to_le32(ps->valid);
        dh->version = cpu_to_le32(ps->version);
        dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
 
-       return chunk_io(ps, 0, WRITE, 1);
+       return chunk_io(ps, ps->header_area, 0, WRITE, 1);
 }
 
 /*
@@ -679,6 +695,8 @@ static int persistent_ctr(struct dm_exception_store *store,
        ps->valid = 1;
        ps->version = SNAPSHOT_DISK_VERSION;
        ps->area = NULL;
+       ps->zero_area = NULL;
+       ps->header_area = NULL;
        ps->next_free = 2;      /* skipping the header and first area */
        ps->current_committed = 0;
 
index d573165..57f1bf7 100644 (file)
@@ -1176,6 +1176,15 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
        return 0;
 }
 
+static int snapshot_iterate_devices(struct dm_target *ti,
+                                   iterate_devices_callout_fn fn, void *data)
+{
+       struct dm_snapshot *snap = ti->private;
+
+       return fn(ti, snap->origin, 0, ti->len, data);
+}
+
+
 /*-----------------------------------------------------------------
  * Origin methods
  *---------------------------------------------------------------*/
@@ -1410,20 +1419,29 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
        return 0;
 }
 
+static int origin_iterate_devices(struct dm_target *ti,
+                                 iterate_devices_callout_fn fn, void *data)
+{
+       struct dm_dev *dev = ti->private;
+
+       return fn(ti, dev, 0, ti->len, data);
+}
+
 static struct target_type origin_target = {
        .name    = "snapshot-origin",
-       .version = {1, 6, 0},
+       .version = {1, 7, 0},
        .module  = THIS_MODULE,
        .ctr     = origin_ctr,
        .dtr     = origin_dtr,
        .map     = origin_map,
        .resume  = origin_resume,
        .status  = origin_status,
+       .iterate_devices = origin_iterate_devices,
 };
 
 static struct target_type snapshot_target = {
        .name    = "snapshot",
-       .version = {1, 6, 0},
+       .version = {1, 7, 0},
        .module  = THIS_MODULE,
        .ctr     = snapshot_ctr,
        .dtr     = snapshot_dtr,
@@ -1431,6 +1449,7 @@ static struct target_type snapshot_target = {
        .end_io  = snapshot_end_io,
        .resume  = snapshot_resume,
        .status  = snapshot_status,
+       .iterate_devices = snapshot_iterate_devices,
 };
 
 static int __init dm_snapshot_init(void)
index 4e0e593..3e563d2 100644 (file)
@@ -329,9 +329,19 @@ static int stripe_iterate_devices(struct dm_target *ti,
        return ret;
 }
 
+static void stripe_io_hints(struct dm_target *ti,
+                           struct queue_limits *limits)
+{
+       struct stripe_c *sc = ti->private;
+       unsigned chunk_size = (sc->chunk_mask + 1) << 9;
+
+       blk_limits_io_min(limits, chunk_size);
+       limits->io_opt = chunk_size * sc->stripes;
+}
+
 static struct target_type stripe_target = {
        .name   = "striped",
-       .version = {1, 2, 0},
+       .version = {1, 3, 0},
        .module = THIS_MODULE,
        .ctr    = stripe_ctr,
        .dtr    = stripe_dtr,
@@ -339,6 +349,7 @@ static struct target_type stripe_target = {
        .end_io = stripe_end_io,
        .status = stripe_status,
        .iterate_devices = stripe_iterate_devices,
+       .io_hints = stripe_io_hints,
 };
 
 int __init dm_stripe_init(void)
index d952b34..1a6cb3c 100644 (file)
@@ -343,10 +343,10 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
 }
 
 /*
- * If possible, this checks an area of a destination device is valid.
+ * If possible, this checks an area of a destination device is invalid.
  */
-static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
-                               sector_t start, sector_t len, void *data)
+static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
+                                 sector_t start, sector_t len, void *data)
 {
        struct queue_limits *limits = data;
        struct block_device *bdev = dev->bdev;
@@ -357,36 +357,40 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
        char b[BDEVNAME_SIZE];
 
        if (!dev_size)
-               return 1;
+               return 0;
 
        if ((start >= dev_size) || (start + len > dev_size)) {
-               DMWARN("%s: %s too small for target",
-                      dm_device_name(ti->table->md), bdevname(bdev, b));
-               return 0;
+               DMWARN("%s: %s too small for target: "
+                      "start=%llu, len=%llu, dev_size=%llu",
+                      dm_device_name(ti->table->md), bdevname(bdev, b),
+                      (unsigned long long)start,
+                      (unsigned long long)len,
+                      (unsigned long long)dev_size);
+               return 1;
        }
 
        if (logical_block_size_sectors <= 1)
-               return 1;
+               return 0;
 
        if (start & (logical_block_size_sectors - 1)) {
                DMWARN("%s: start=%llu not aligned to h/w "
-                      "logical block size %hu of %s",
+                      "logical block size %u of %s",
                       dm_device_name(ti->table->md),
                       (unsigned long long)start,
                       limits->logical_block_size, bdevname(bdev, b));
-               return 0;
+               return 1;
        }
 
        if (len & (logical_block_size_sectors - 1)) {
                DMWARN("%s: len=%llu not aligned to h/w "
-                      "logical block size %hu of %s",
+                      "logical block size %u of %s",
                       dm_device_name(ti->table->md),
                       (unsigned long long)len,
                       limits->logical_block_size, bdevname(bdev, b));
-               return 0;
+               return 1;
        }
 
-       return 1;
+       return 0;
 }
 
 /*
@@ -496,8 +500,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
        }
 
        if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
-               DMWARN("%s: target device %s is misaligned",
-                      dm_device_name(ti->table->md), bdevname(bdev, b));
+               DMWARN("%s: target device %s is misaligned: "
+                      "physical_block_size=%u, logical_block_size=%u, "
+                      "alignment_offset=%u, start=%llu",
+                      dm_device_name(ti->table->md), bdevname(bdev, b),
+                      q->limits.physical_block_size,
+                      q->limits.logical_block_size,
+                      q->limits.alignment_offset,
+                      (unsigned long long) start << 9);
+
 
        /*
         * Check if merge fn is supported.
@@ -698,7 +709,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
 
        if (remaining) {
                DMWARN("%s: table line %u (start sect %llu len %llu) "
-                      "not aligned to h/w logical block size %hu",
+                      "not aligned to h/w logical block size %u",
                       dm_device_name(table->md), i,
                       (unsigned long long) ti->begin,
                       (unsigned long long) ti->len,
@@ -996,12 +1007,16 @@ int dm_calculate_queue_limits(struct dm_table *table,
                ti->type->iterate_devices(ti, dm_set_device_limits,
                                          &ti_limits);
 
+               /* Set I/O hints portion of queue limits */
+               if (ti->type->io_hints)
+                       ti->type->io_hints(ti, &ti_limits);
+
                /*
                 * Check each device area is consistent with the target's
                 * overall queue limits.
                 */
-               if (!ti->type->iterate_devices(ti, device_area_is_valid,
-                                              &ti_limits))
+               if (ti->type->iterate_devices(ti, device_area_is_invalid,
+                                             &ti_limits))
                        return -EINVAL;
 
 combine_limits:
index 8a311ea..b4845b1 100644 (file)
@@ -738,16 +738,22 @@ static void rq_completed(struct mapped_device *md, int run_queue)
        dm_put(md);
 }
 
+static void free_rq_clone(struct request *clone)
+{
+       struct dm_rq_target_io *tio = clone->end_io_data;
+
+       blk_rq_unprep_clone(clone);
+       free_rq_tio(tio);
+}
+
 static void dm_unprep_request(struct request *rq)
 {
        struct request *clone = rq->special;
-       struct dm_rq_target_io *tio = clone->end_io_data;
 
        rq->special = NULL;
        rq->cmd_flags &= ~REQ_DONTPREP;
 
-       blk_rq_unprep_clone(clone);
-       free_rq_tio(tio);
+       free_rq_clone(clone);
 }
 
 /*
@@ -825,8 +831,7 @@ static void dm_end_request(struct request *clone, int error)
                        rq->sense_len = clone->sense_len;
        }
 
-       BUG_ON(clone->bio);
-       free_rq_tio(tio);
+       free_rq_clone(clone);
 
        blk_end_request_all(rq, error);
 
index e212f2c..24f7ca5 100644 (file)
@@ -491,6 +491,7 @@ static int gfar_remove(struct of_device *ofdev)
 
        dev_set_drvdata(&ofdev->dev, NULL);
 
+       unregister_netdev(dev);
        iounmap(priv->regs);
        free_netdev(priv->ndev);
 
index 6dcac73..f593fbb 100644 (file)
@@ -2874,45 +2874,27 @@ static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
        return 0;
 }
 
-static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
-                                u32 src_phys, u32 dest_address, u32 length)
+static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
+                                int nr, u32 dest_address, u32 len)
 {
-       u32 bytes_left = length;
-       u32 src_offset = 0;
-       u32 dest_offset = 0;
-       int status = 0;
+       int ret, i;
+       u32 size;
+
        IPW_DEBUG_FW(">> \n");
-       IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
-                         src_phys, dest_address, length);
-       while (bytes_left > CB_MAX_LENGTH) {
-               status = ipw_fw_dma_add_command_block(priv,
-                                                     src_phys + src_offset,
-                                                     dest_address +
-                                                     dest_offset,
-                                                     CB_MAX_LENGTH, 0, 0);
-               if (status) {
+       IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
+                         nr, dest_address, len);
+
+       for (i = 0; i < nr; i++) {
+               size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
+               ret = ipw_fw_dma_add_command_block(priv, src_address[i],
+                                                  dest_address +
+                                                  i * CB_MAX_LENGTH, size,
+                                                  0, 0);
+               if (ret) {
                        IPW_DEBUG_FW_INFO(": Failed\n");
                        return -1;
                } else
                        IPW_DEBUG_FW_INFO(": Added new cb\n");
-
-               src_offset += CB_MAX_LENGTH;
-               dest_offset += CB_MAX_LENGTH;
-               bytes_left -= CB_MAX_LENGTH;
-       }
-
-       /* add the buffer tail */
-       if (bytes_left > 0) {
-               status =
-                   ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
-                                                dest_address + dest_offset,
-                                                bytes_left, 0, 0);
-               if (status) {
-                       IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
-                       return -1;
-               } else
-                       IPW_DEBUG_FW_INFO
-                           (": Adding new cb - the buffer tail\n");
        }
 
        IPW_DEBUG_FW("<< \n");
@@ -3160,59 +3142,91 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
 
 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
 {
-       int rc = -1;
+       int ret = -1;
        int offset = 0;
        struct fw_chunk *chunk;
-       dma_addr_t shared_phys;
-       u8 *shared_virt;
+       int total_nr = 0;
+       int i;
+       struct pci_pool *pool;
+       u32 *virts[CB_NUMBER_OF_ELEMENTS_SMALL];
+       dma_addr_t phys[CB_NUMBER_OF_ELEMENTS_SMALL];
 
        IPW_DEBUG_TRACE("<< : \n");
-       shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
 
-       if (!shared_virt)
+       pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
+       if (!pool) {
+               IPW_ERROR("pci_pool_create failed\n");
                return -ENOMEM;
-
-       memmove(shared_virt, data, len);
+       }
 
        /* Start the Dma */
-       rc = ipw_fw_dma_enable(priv);
+       ret = ipw_fw_dma_enable(priv);
 
        /* the DMA is already ready this would be a bug. */
        BUG_ON(priv->sram_desc.last_cb_index > 0);
 
        do {
+               u32 chunk_len;
+               u8 *start;
+               int size;
+               int nr = 0;
+
                chunk = (struct fw_chunk *)(data + offset);
                offset += sizeof(struct fw_chunk);
+               chunk_len = le32_to_cpu(chunk->length);
+               start = data + offset;
+
+               nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
+               for (i = 0; i < nr; i++) {
+                       virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
+                                                        &phys[total_nr]);
+                       if (!virts[total_nr]) {
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+                       size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
+                                    CB_MAX_LENGTH);
+                       memcpy(virts[total_nr], start, size);
+                       start += size;
+                       total_nr++;
+                       /* We don't support fw chunk larger than 64*8K */
+                       BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
+               }
+
                /* build DMA packet and queue up for sending */
                /* dma to chunk->address, the chunk->length bytes from data +
                 * offeset*/
                /* Dma loading */
-               rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
-                                          le32_to_cpu(chunk->address),
-                                          le32_to_cpu(chunk->length));
-               if (rc) {
+               ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
+                                           nr, le32_to_cpu(chunk->address),
+                                           chunk_len);
+               if (ret) {
                        IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
                        goto out;
                }
 
-               offset += le32_to_cpu(chunk->length);
+               offset += chunk_len;
        } while (offset < len);
 
        /* Run the DMA and wait for the answer */
-       rc = ipw_fw_dma_kick(priv);
-       if (rc) {
+       ret = ipw_fw_dma_kick(priv);
+       if (ret) {
                IPW_ERROR("dmaKick Failed\n");
                goto out;
        }
 
-       rc = ipw_fw_dma_wait(priv);
-       if (rc) {
+       ret = ipw_fw_dma_wait(priv);
+       if (ret) {
                IPW_ERROR("dmaWaitSync Failed\n");
                goto out;
        }
-      out:
-       pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
-       return rc;
+ out:
+       for (i = 0; i < total_nr; i++)
+               pci_pool_free(pool, virts[i], phys[i]);
+
+       pci_pool_destroy(pool);
+
+       return ret;
 }
 
 /* stop nic */
index e3a8721..e03fe98 100644 (file)
@@ -597,6 +597,29 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno,
                4 * (resno - PCI_IOV_RESOURCES);
 }
 
+/**
+ * pci_sriov_resource_alignment - get resource alignment for VF BAR
+ * @dev: the PCI device
+ * @resno: the resource number
+ *
+ * Returns the alignment of the VF BAR found in the SR-IOV capability.
+ * This is not the same as the resource size which is defined as
+ * the VF BAR size multiplied by the number of VFs.  The alignment
+ * is just the VF BAR size.
+ */
+int pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
+{
+       struct resource tmp;
+       enum pci_bar_type type;
+       int reg = pci_iov_resource_bar(dev, resno, &type);
+       
+       if (!reg)
+               return 0;
+
+        __pci_read_base(dev, type, &tmp, reg);
+       return resource_alignment(&tmp);
+}
+
 /**
  * pci_restore_iov_state - restore the state of the IOV capability
  * @dev: the PCI device
index f73bcbe..5ff4d25 100644 (file)
@@ -243,6 +243,7 @@ extern int pci_iov_init(struct pci_dev *dev);
 extern void pci_iov_release(struct pci_dev *dev);
 extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
                                enum pci_bar_type *type);
+extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
 extern void pci_restore_iov_state(struct pci_dev *dev);
 extern int pci_iov_bus_range(struct pci_bus *bus);
 
@@ -298,4 +299,16 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
 }
 #endif /* CONFIG_PCI_IOV */
 
+static inline int pci_resource_alignment(struct pci_dev *dev,
+                                        struct resource *res)
+{
+#ifdef CONFIG_PCI_IOV
+       int resno = res - dev->resource;
+
+       if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+               return pci_sriov_resource_alignment(dev, resno);
+#endif
+       return resource_alignment(res);
+}
+
 #endif /* DRIVERS_PCI_H */
index b636e24..7c443b4 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/ioport.h>
 #include <linux/cache.h>
 #include <linux/slab.h>
-
+#include "pci.h"
 
 static void pbus_assign_resources_sorted(const struct pci_bus *bus)
 {
@@ -384,7 +384,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
                                continue;
                        r_size = resource_size(r);
                        /* For bridges size != alignment */
-                       align = resource_alignment(r);
+                       align = pci_resource_alignment(dev, r);
                        order = __ffs(align) - 20;
                        if (order > 11) {
                                dev_warn(&dev->dev, "BAR %d bad alignment %llx: "
index 1898c7b..88cdd1a 100644 (file)
@@ -144,7 +144,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
 
        size = resource_size(res);
        min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
-       align = resource_alignment(res);
+       align = pci_resource_alignment(dev, res);
 
        /* First, try exact prefetching match.. */
        ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -178,7 +178,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
        struct pci_bus *bus;
        int ret;
 
-       align = resource_alignment(res);
+       align = pci_resource_alignment(dev, res);
        if (!align) {
                dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
                        "alignment) %pR flags %#lx\n",
@@ -259,7 +259,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
                if (!(r->flags) || r->parent)
                        continue;
 
-               r_align = resource_alignment(r);
+               r_align = pci_resource_alignment(dev, r);
                if (!r_align) {
                        dev_warn(&dev->dev, "BAR %d: bogus alignment "
                                "%pR flags %#lx\n",
@@ -271,7 +271,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
                        struct resource_list *ln = list->next;
 
                        if (ln)
-                               align = resource_alignment(ln->res);
+                               align = pci_resource_alignment(ln->dev, ln->res);
 
                        if (r_align > align) {
                                tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
index 94502da..6d6f98f 100644 (file)
@@ -1485,20 +1485,15 @@ int compat_do_execve(char * filename,
        if (!bprm)
                goto out_files;
 
-       retval = -ERESTARTNOINTR;
-       if (mutex_lock_interruptible(&current->cred_guard_mutex))
+       retval = prepare_bprm_creds(bprm);
+       if (retval)
                goto out_free;
-       current->in_execve = 1;
-
-       retval = -ENOMEM;
-       bprm->cred = prepare_exec_creds();
-       if (!bprm->cred)
-               goto out_unlock;
 
        retval = check_unsafe_exec(bprm);
        if (retval < 0)
-               goto out_unlock;
+               goto out_free;
        clear_in_exec = retval;
+       current->in_execve = 1;
 
        file = open_exec(filename);
        retval = PTR_ERR(file);
@@ -1547,7 +1542,6 @@ int compat_do_execve(char * filename,
        /* execve succeeded */
        current->fs->in_exec = 0;
        current->in_execve = 0;
-       mutex_unlock(&current->cred_guard_mutex);
        acct_update_integrals(current);
        free_bprm(bprm);
        if (displaced)
@@ -1567,10 +1561,7 @@ out_file:
 out_unmark:
        if (clear_in_exec)
                current->fs->in_exec = 0;
-
-out_unlock:
        current->in_execve = 0;
-       mutex_unlock(&current->cred_guard_mutex);
 
 out_free:
        free_bprm(bprm);
index fb4f3cd..172ceb6 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1015,6 +1015,35 @@ out:
 
 EXPORT_SYMBOL(flush_old_exec);
 
+/*
+ * Prepare credentials and lock ->cred_guard_mutex.
+ * install_exec_creds() commits the new creds and drops the lock.
+ * Or, if exec fails before, free_bprm() should release ->cred and
+ * and unlock.
+ */
+int prepare_bprm_creds(struct linux_binprm *bprm)
+{
+       if (mutex_lock_interruptible(&current->cred_guard_mutex))
+               return -ERESTARTNOINTR;
+
+       bprm->cred = prepare_exec_creds();
+       if (likely(bprm->cred))
+               return 0;
+
+       mutex_unlock(&current->cred_guard_mutex);
+       return -ENOMEM;
+}
+
+void free_bprm(struct linux_binprm *bprm)
+{
+       free_arg_pages(bprm);
+       if (bprm->cred) {
+               mutex_unlock(&current->cred_guard_mutex);
+               abort_creds(bprm->cred);
+       }
+       kfree(bprm);
+}
+
 /*
  * install the new credentials for this executable
  */
@@ -1024,12 +1053,13 @@ void install_exec_creds(struct linux_binprm *bprm)
 
        commit_creds(bprm->cred);
        bprm->cred = NULL;
-
-       /* cred_guard_mutex must be held at least to this point to prevent
+       /*
+        * cred_guard_mutex must be held at least to this point to prevent
         * ptrace_attach() from altering our determination of the task's
-        * credentials; any time after this it may be unlocked */
-
+        * credentials; any time after this it may be unlocked.
+        */
        security_bprm_committed_creds(bprm);
+       mutex_unlock(&current->cred_guard_mutex);
 }
 EXPORT_SYMBOL(install_exec_creds);
 
@@ -1246,14 +1276,6 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
 
 EXPORT_SYMBOL(search_binary_handler);
 
-void free_bprm(struct linux_binprm *bprm)
-{
-       free_arg_pages(bprm);
-       if (bprm->cred)
-               abort_creds(bprm->cred);
-       kfree(bprm);
-}
-
 /*
  * sys_execve() executes a new program.
  */
@@ -1277,20 +1299,15 @@ int do_execve(char * filename,
        if (!bprm)
                goto out_files;
 
-       retval = -ERESTARTNOINTR;
-       if (mutex_lock_interruptible(&current->cred_guard_mutex))
+       retval = prepare_bprm_creds(bprm);
+       if (retval)
                goto out_free;
-       current->in_execve = 1;
-
-       retval = -ENOMEM;
-       bprm->cred = prepare_exec_creds();
-       if (!bprm->cred)
-               goto out_unlock;
 
        retval = check_unsafe_exec(bprm);
        if (retval < 0)
-               goto out_unlock;
+               goto out_free;
        clear_in_exec = retval;
+       current->in_execve = 1;
 
        file = open_exec(filename);
        retval = PTR_ERR(file);
@@ -1340,7 +1357,6 @@ int do_execve(char * filename,
        /* execve succeeded */
        current->fs->in_exec = 0;
        current->in_execve = 0;
-       mutex_unlock(&current->cred_guard_mutex);
        acct_update_integrals(current);
        free_bprm(bprm);
        if (displaced)
@@ -1360,10 +1376,7 @@ out_file:
 out_unmark:
        if (clear_in_exec)
                current->fs->in_exec = 0;
-
-out_unlock:
        current->in_execve = 0;
-       mutex_unlock(&current->cred_guard_mutex);
 
 out_free:
        free_bprm(bprm);
index e1dedb0..78d9b92 100644 (file)
@@ -362,6 +362,10 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
        if (dir_de) {
                if (old_dir != new_dir)
                        ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0);
+               else {
+                       kunmap(dir_page);
+                       page_cache_release(dir_page);
+               }
                inode_dec_link_count(old_dir);
        }
        return 0;
index 7e0b61b..c668bca 100644 (file)
@@ -209,6 +209,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
                 * We cannot call radix_tree_preload for the kernels older
                 * than 2.6.23, because it is not exported for modules.
                 */
+retry:
                err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
                if (err)
                        goto failed_unlock;
@@ -219,7 +220,6 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
                                       (unsigned long long)oldkey,
                                       (unsigned long long)newkey);
 
-retry:
                spin_lock_irq(&btnc->tree_lock);
                err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);
                spin_unlock_irq(&btnc->tree_lock);
index b401654..8a1e615 100644 (file)
@@ -1747,8 +1747,8 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
         * we know zeros will only be needed in the first and/or last cluster.
         */
        if (clusters_to_alloc || extents_to_split ||
-           wc->w_desc[0].c_needs_zero ||
-           wc->w_desc[wc->w_clen - 1].c_needs_zero)
+           (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
+                           wc->w_desc[wc->w_clen - 1].c_needs_zero)))
                cluster_of_pages = 1;
        else
                cluster_of_pages = 0;
index 2f28b7d..b4957c7 100644 (file)
@@ -85,6 +85,17 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
                goto bail;
        }
 
+       /*
+        * If the last lookup failed to create dentry lock, let us
+        * redo it.
+        */
+       if (!dentry->d_fsdata) {
+               mlog(0, "Inode %llu doesn't have dentry lock, "
+                    "returning false\n",
+                    (unsigned long long)OCFS2_I(inode)->ip_blkno);
+               goto bail;
+       }
+
        ret = 1;
 
 bail:
index 0882d16..eafcc7c 100644 (file)
@@ -619,7 +619,7 @@ xfs_file_compat_ioctl(
        case XFS_IOC_GETVERSION_32:
                cmd = _NATIVE_IOC(cmd, long);
                return xfs_file_ioctl(filp, cmd, p);
-       case XFS_IOC_SWAPEXT: {
+       case XFS_IOC_SWAPEXT_32: {
                struct xfs_swapext        sxp;
                struct compat_xfs_swapext __user *sxu = arg;
 
index 0105454..5a2bd1c 100644 (file)
@@ -137,6 +137,7 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
 int crypto_enqueue_request(struct crypto_queue *queue,
                           struct crypto_async_request *request);
+void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset);
 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
 int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
 
index 2ba42cd..3a748a6 100644 (file)
@@ -79,8 +79,8 @@ static inline int skcipher_enqueue_givcrypt(
 static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt(
        struct crypto_queue *queue)
 {
-       return container_of(ablkcipher_dequeue_request(queue),
-                           struct skcipher_givcrypt_request, creq);
+       return __crypto_dequeue_request(
+               queue, offsetof(struct skcipher_givcrypt_request, creq.base));
 }
 
 static inline void *skcipher_givcrypt_reqctx(
index 61ee18c..2046b5b 100644 (file)
@@ -117,6 +117,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
                           int executable_stack);
 extern int bprm_mm_init(struct linux_binprm *bprm);
 extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
+extern int prepare_bprm_creds(struct linux_binprm *bprm);
 extern void install_exec_creds(struct linux_binprm *bprm);
 extern void do_coredump(long signr, int exit_code, struct pt_regs *regs);
 extern int set_binfmt(struct linux_binfmt *new);
index 655e772..df7607e 100644 (file)
@@ -91,6 +91,9 @@ typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
                                      iterate_devices_callout_fn fn,
                                      void *data);
 
+typedef void (*dm_io_hints_fn) (struct dm_target *ti,
+                               struct queue_limits *limits);
+
 /*
  * Returns:
  *    0: The target can handle the next I/O immediately.
@@ -151,6 +154,7 @@ struct target_type {
        dm_merge_fn merge;
        dm_busy_fn busy;
        dm_iterate_devices_fn iterate_devices;
+       dm_io_hints_fn io_hints;
 
        /* For internal device-mapper use. */
        struct list_head list;
index 642e301..8a1f972 100644 (file)
        (DM_ULOG_REQUEST_MASK & (request_type))
 
 struct dm_ulog_request {
-       char uuid[DM_UUID_LEN]; /* Ties a request to a specific mirror log */
+       /*
+        * The local unique identifier (luid) and the universally unique
+        * identifier (uuid) are used to tie a request to a specific
+        * mirror log.  A single machine log could probably make due with
+        * just the 'luid', but a cluster-aware log must use the 'uuid' and
+        * the 'luid'.  The uuid is what is required for node to node
+        * communication concerning a particular log, but the 'luid' helps
+        * differentiate between logs that are being swapped and have the
+        * same 'uuid'.  (Think "live" and "inactive" device-mapper tables.)
+        */
+       uint64_t luid;
+       char uuid[DM_UUID_LEN];
        char padding[7];        /* Padding because DM_UUID_LEN = 129 */
 
        int32_t error;          /* Used to report back processing errors */
index 13e1adf..6273fa9 100644 (file)
@@ -240,6 +240,21 @@ static inline int cancel_delayed_work(struct delayed_work *work)
        return ret;
 }
 
+/*
+ * Like above, but uses del_timer() instead of del_timer_sync(). This means,
+ * if it returns 0 the timer function may be running and the queueing is in
+ * progress.
+ */
+static inline int __cancel_delayed_work(struct delayed_work *work)
+{
+       int ret;
+
+       ret = del_timer(&work->timer);
+       if (ret)
+               work_clear_pending(&work->work);
+       return ret;
+}
+
 extern int cancel_delayed_work_sync(struct delayed_work *work);
 
 /* Obsolete. use cancel_delayed_work_sync() */
index 7eafb8d..82a3191 100644 (file)
@@ -61,8 +61,8 @@ psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
 }
 
 struct qdisc_watchdog {
-       struct tasklet_hrtimer  timer;
-       struct Qdisc            *qdisc;
+       struct hrtimer  timer;
+       struct Qdisc    *qdisc;
 };
 
 extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
index f274e19..d7cbc57 100644 (file)
@@ -50,7 +50,7 @@ static atomic_t nr_task_counters __read_mostly;
  *  1 - disallow cpu counters to unpriv
  *  2 - disallow kernel profiling to unpriv
  */
-int sysctl_perf_counter_paranoid __read_mostly;
+int sysctl_perf_counter_paranoid __read_mostly = 1;
 
 static inline bool perf_paranoid_cpu(void)
 {
@@ -4066,6 +4066,7 @@ perf_counter_alloc(struct perf_counter_attr *attr,
        hwc->sample_period = attr->sample_period;
        if (attr->freq && attr->sample_freq)
                hwc->sample_period = 1;
+       hwc->last_period = hwc->sample_period;
 
        atomic64_set(&hwc->period_left, hwc->sample_period);
 
index 4bde489..66e81e7 100644 (file)
@@ -1352,6 +1352,7 @@ unsigned long do_mmap_pgoff(struct file *file,
        }
 
        vma->vm_region = region;
+       add_nommu_region(region);
 
        /* set up the mapping */
        if (file && vma->vm_flags & VM_SHARED)
@@ -1361,8 +1362,6 @@ unsigned long do_mmap_pgoff(struct file *file,
        if (ret < 0)
                goto error_put_region;
 
-       add_nommu_region(region);
-
        /* okay... we have a mapping; now we have to register it */
        result = vma->vm_start;
 
index 5cc986e..a0de15f 100644 (file)
@@ -817,13 +817,15 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
                         * agressive about taking ownership of free pages
                         */
                        if (unlikely(current_order >= (pageblock_order >> 1)) ||
-                                       start_migratetype == MIGRATE_RECLAIMABLE) {
+                                       start_migratetype == MIGRATE_RECLAIMABLE ||
+                                       page_group_by_mobility_disabled) {
                                unsigned long pages;
                                pages = move_freepages_block(zone, page,
                                                                start_migratetype);
 
                                /* Claim the whole block if over half of it is free */
-                               if (pages >= (1 << (pageblock_order-1)))
+                               if (pages >= (1 << (pageblock_order-1)) ||
+                                               page_group_by_mobility_disabled)
                                        set_pageblock_migratetype(page,
                                                                start_migratetype);
 
index 5fe3784..3311c89 100644 (file)
@@ -197,7 +197,12 @@ static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
 static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
                                     int page_idx)
 {
-       return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL;
+       /*
+        * Any possible cpu id can be used here, so there's no need to
+        * worry about preemption or cpu hotplug.
+        */
+       return *pcpu_chunk_pagep(chunk, raw_smp_processor_id(),
+                                page_idx) != NULL;
 }
 
 /* set the pointer to a chunk in a page struct */
@@ -297,6 +302,14 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
                return pcpu_first_chunk;
        }
 
+       /*
+        * The address is relative to unit0 which might be unused and
+        * thus unmapped.  Offset the address to the unit space of the
+        * current processor before looking it up in the vmalloc
+        * space.  Note that any possible cpu id can be used here, so
+        * there's no need to worry about preemption or cpu hotplug.
+        */
+       addr += raw_smp_processor_id() * pcpu_unit_size;
        return pcpu_get_page_chunk(vmalloc_to_page(addr));
 }
 
index b9f1491..b627675 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2594,8 +2594,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
  */
 void kmem_cache_destroy(struct kmem_cache *s)
 {
-       if (s->flags & SLAB_DESTROY_BY_RCU)
-               rcu_barrier();
        down_write(&slub_lock);
        s->refcount--;
        if (!s->refcount) {
@@ -2606,6 +2604,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
                                "still has objects.\n", s->name, __func__);
                        dump_stack();
                }
+               if (s->flags & SLAB_DESTROY_BY_RCU)
+                       rcu_barrier();
                sysfs_slab_remove(s);
        } else
                up_write(&slub_lock);
index bbb25be..7633422 100644 (file)
@@ -1025,6 +1025,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
                sk->sk_prot = sk->sk_prot_creator = prot;
                sock_lock_init(sk);
                sock_net_set(sk, get_net(net));
+               atomic_set(&sk->sk_wmem_alloc, 1);
        }
 
        return sk;
@@ -1872,7 +1873,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
         */
        smp_wmb();
        atomic_set(&sk->sk_refcnt, 1);
-       atomic_set(&sk->sk_wmem_alloc, 1);
        atomic_set(&sk->sk_drops, 0);
 }
 EXPORT_SYMBOL(sock_init_data);
index 92e6f3a..fdb694e 100644 (file)
@@ -458,7 +458,7 @@ EXPORT_SYMBOL(qdisc_warn_nonwc);
 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
 {
        struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
-                                                timer.timer);
+                                                timer);
 
        wd->qdisc->flags &= ~TCQ_F_THROTTLED;
        __netif_schedule(qdisc_root(wd->qdisc));
@@ -468,8 +468,8 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
 
 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
 {
-       tasklet_hrtimer_init(&wd->timer, qdisc_watchdog,
-                            CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+       hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+       wd->timer.function = qdisc_watchdog;
        wd->qdisc = qdisc;
 }
 EXPORT_SYMBOL(qdisc_watchdog_init);
@@ -485,13 +485,13 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
        wd->qdisc->flags |= TCQ_F_THROTTLED;
        time = ktime_set(0, 0);
        time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
-       tasklet_hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
+       hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
 }
 EXPORT_SYMBOL(qdisc_watchdog_schedule);
 
 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
 {
-       tasklet_hrtimer_cancel(&wd->timer);
+       hrtimer_cancel(&wd->timer);
        wd->qdisc->flags &= ~TCQ_F_THROTTLED;
 }
 EXPORT_SYMBOL(qdisc_watchdog_cancel);
@@ -1456,6 +1456,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
        nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
        tcm = NLMSG_DATA(nlh);
        tcm->tcm_family = AF_UNSPEC;
+       tcm->tcm__pad1 = 0;
+       tcm->tcm__pad2 = 0;
        tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
        tcm->tcm_parent = q->handle;
        tcm->tcm_handle = q->handle;
index 149b040..d5798e1 100644 (file)
@@ -163,7 +163,7 @@ struct cbq_sched_data
        psched_time_t           now_rt;         /* Cached real time */
        unsigned                pmask;
 
-       struct tasklet_hrtimer  delay_timer;
+       struct hrtimer          delay_timer;
        struct qdisc_watchdog   watchdog;       /* Watchdog timer,
                                                   started when CBQ has
                                                   backlog, but cannot
@@ -503,8 +503,6 @@ static void cbq_ovl_delay(struct cbq_class *cl)
                cl->undertime = q->now + delay;
 
                if (delay > 0) {
-                       struct hrtimer *ht;
-
                        sched += delay + cl->penalty;
                        cl->penalized = sched;
                        cl->cpriority = TC_CBQ_MAXPRIO;
@@ -512,12 +510,12 @@ static void cbq_ovl_delay(struct cbq_class *cl)
 
                        expires = ktime_set(0, 0);
                        expires = ktime_add_ns(expires, PSCHED_TICKS2NS(sched));
-                       ht = &q->delay_timer.timer;
-                       if (hrtimer_try_to_cancel(ht) &&
-                           ktime_to_ns(ktime_sub(hrtimer_get_expires(ht),
-                                                 expires)) > 0)
-                               hrtimer_set_expires(ht, expires);
-                       hrtimer_restart(ht);
+                       if (hrtimer_try_to_cancel(&q->delay_timer) &&
+                           ktime_to_ns(ktime_sub(
+                                       hrtimer_get_expires(&q->delay_timer),
+                                       expires)) > 0)
+                               hrtimer_set_expires(&q->delay_timer, expires);
+                       hrtimer_restart(&q->delay_timer);
                        cl->delayed = 1;
                        cl->xstats.overactions++;
                        return;
@@ -593,7 +591,7 @@ static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
 static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
 {
        struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
-                                               delay_timer.timer);
+                                               delay_timer);
        struct Qdisc *sch = q->watchdog.qdisc;
        psched_time_t now;
        psched_tdiff_t delay = 0;
@@ -623,7 +621,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
 
                time = ktime_set(0, 0);
                time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
-               tasklet_hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
+               hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
        }
 
        sch->flags &= ~TCQ_F_THROTTLED;
@@ -1216,7 +1214,7 @@ cbq_reset(struct Qdisc* sch)
        q->tx_class = NULL;
        q->tx_borrowed = NULL;
        qdisc_watchdog_cancel(&q->watchdog);
-       tasklet_hrtimer_cancel(&q->delay_timer);
+       hrtimer_cancel(&q->delay_timer);
        q->toplevel = TC_CBQ_MAXLEVEL;
        q->now = psched_get_time();
        q->now_rt = q->now;
@@ -1399,8 +1397,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
        q->link.minidle = -0x7FFFFFFF;
 
        qdisc_watchdog_init(&q->watchdog, sch);
-       tasklet_hrtimer_init(&q->delay_timer, cbq_undelay,
-                            CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+       hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
        q->delay_timer.function = cbq_undelay;
        q->toplevel = TC_CBQ_MAXLEVEL;
        q->now = psched_get_time();
index 312251d..9a8936e 100644 (file)
@@ -260,6 +260,9 @@ oxygen_search_pci_id(struct oxygen *chip, const struct pci_device_id ids[])
         * chip didn't if the first EEPROM word was overwritten.
         */
        subdevice = oxygen_read_eeprom(chip, 2);
+       /* use default ID if EEPROM is missing */
+       if (subdevice == 0xffff)
+               subdevice = 0x8788;
        /*
         * We use only the subsystem device ID for searching because it is
         * unique even without the subsystem vendor ID, which may have been
index 3b5ca70..ef2345d 100644 (file)
@@ -469,9 +469,11 @@ static int oxygen_multich_hw_params(struct snd_pcm_substream *substream,
        oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT,
                              oxygen_rate(hw_params) |
                              chip->model.dac_i2s_format |
+                             oxygen_i2s_mclk(hw_params) |
                              oxygen_i2s_bits(hw_params),
                              OXYGEN_I2S_RATE_MASK |
                              OXYGEN_I2S_FORMAT_MASK |
+                             OXYGEN_I2S_MCLK_MASK |
                              OXYGEN_I2S_BITS_MASK);
        oxygen_update_dac_routing(chip);
        oxygen_update_spdif_source(chip);