Merge branch 'stable-3.2' into pandora-3.2
authorGrazvydas Ignotas <notasas@gmail.com>
Mon, 6 Aug 2012 22:10:39 +0000 (01:10 +0300)
committerGrazvydas Ignotas <notasas@gmail.com>
Mon, 6 Aug 2012 22:10:39 +0000 (01:10 +0300)
283 files changed:
Documentation/stable_kernel_rules.txt
Makefile
arch/arm/kernel/smp.c
arch/arm/plat-samsung/adc.c
arch/arm/plat-samsung/include/plat/map-s3c.h
arch/arm/plat-samsung/include/plat/watchdog-reset.h
arch/mips/include/asm/thread_info.h
arch/mips/kernel/vmlinux.lds.S
arch/powerpc/include/asm/cputime.h
arch/powerpc/include/asm/reg.h
arch/powerpc/kernel/ftrace.c
arch/powerpc/kernel/time.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/xmon/xmon.c
arch/s390/kernel/processor.c
arch/s390/kernel/smp.c
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/processor.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/amd_nb.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/proc.c
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/microcode_core.c
arch/x86/kernel/reboot.c
arch/x86/pci/fixup.c
block/blk-core.c
block/blk-exec.c
block/blk-sysfs.c
block/blk-throttle.c
block/blk.h
block/scsi_ioctl.c
drivers/acpi/ac.c
drivers/acpi/acpi_pad.c
drivers/acpi/processor_core.c
drivers/acpi/sleep.c
drivers/acpi/sysfs.c
drivers/base/power/main.c
drivers/block/umem.c
drivers/char/hw_random/atmel-rng.c
drivers/edac/i7core_edac.c
drivers/edac/sb_edac.c
drivers/gpio/gpio-wm8994.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nva3_copy.fuc
drivers/gpu/drm/nouveau/nva3_copy.fuc.h
drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cursor.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/hid/hid-apple.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hwmon/applesmc.c
drivers/hwmon/coretemp.c
drivers/hwmon/it87.c
drivers/hwspinlock/hwspinlock_core.c
drivers/input/joystick/xpad.c
drivers/input/mouse/bcm5974.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/md/dm-raid1.c
drivers/md/dm-region-hash.c
drivers/md/dm-thin.c
drivers/md/md.c
drivers/md/persistent-data/dm-space-map-checker.c
drivers/md/persistent-data/dm-space-map-disk.c
drivers/md/persistent-data/dm-transaction-manager.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/dvb/dvb-core/dvbdev.c
drivers/media/dvb/siano/smsusb.c
drivers/media/video/cx25821/cx25821-core.c
drivers/media/video/cx25821/cx25821.h
drivers/media/video/gspca/gspca.c
drivers/mmc/host/sdhci-pci.c
drivers/mtd/nand/cafe_nand.c
drivers/mtd/nand/nandsim.c
drivers/net/bonding/bond_debugfs.c
drivers/net/bonding/bond_main.c
drivers/net/can/c_can/c_can.c
drivers/net/can/flexcan.c
drivers/net/dummy.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/macvtap.c
drivers/net/usb/ipheth.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/key.c
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/ipw2x00/ipw.h [new file with mode: 0644]
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/iwlegacy/iwl-4965-sta.c
drivers/net/wireless/iwlegacy/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
drivers/net/wireless/mwifiex/11n_rxreorder.c
drivers/net/wireless/mwifiex/11n_rxreorder.h
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00usb.c
drivers/net/wireless/rtl818x/rtl8187/leds.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/rtlwifi/rtl8192de/phy.c
drivers/net/xen-netfront.c
drivers/oprofile/oprofile_perf.c
drivers/pci/pci-driver.c
drivers/pci/pci.c
drivers/pci/quirks.c
drivers/platform/x86/intel_ips.c
drivers/platform/x86/samsung-laptop.c
drivers/rtc/rtc-mxc.c
drivers/scsi/aic94xx/aic94xx_task.c
drivers/scsi/hosts.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/scsi.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_priv.h
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/staging/iio/adc/ad7606_core.c
drivers/staging/rtl8712/usb_intf.c
drivers/staging/rts_pstor/rtsx_transport.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_core.h
drivers/target/iscsi/iscsi_target_login.c
drivers/target/target_core_cdb.c
drivers/target/target_core_pr.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_sess.c
drivers/usb/class/cdc-wdm.c
drivers/usb/core/devio.c
drivers/usb/core/hub.c
drivers/usb/gadget/u_ether.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci.h
drivers/usb/serial/cp210x.c
drivers/usb/serial/option.c
drivers/vhost/vhost.c
fs/btrfs/async-thread.c
fs/btrfs/disk-io.c
fs/btrfs/tree-log.c
fs/buffer.c
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/readdir.c
fs/ecryptfs/kthread.c
fs/ecryptfs/miscdev.c
fs/eventpoll.c
fs/exofs/ore.c
fs/exofs/ore_raid.c
fs/ext4/balloc.c
fs/ext4/bitmap.c
fs/ext4/ext4.h
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/resize.c
fs/ext4/super.c
fs/fifo.c
fs/hugetlbfs/inode.c
fs/locks.c
fs/nfs/idmap.c
fs/nfs/internal.h
fs/nfs/nfs4state.c
fs/nfs/objlayout/objio_osd.c
fs/nfs/write.c
fs/nilfs2/gcinode.c
fs/nilfs2/segment.c
fs/ocfs2/file.c
fs/open.c
fs/ramfs/file-nommu.c
fs/splice.c
fs/ubifs/sb.c
fs/udf/super.c
include/asm-generic/pgtable.h
include/linux/Kbuild
include/linux/aio.h
include/linux/blkdev.h
include/linux/cpu.h
include/linux/cpuset.h
include/linux/fs.h
include/linux/hrtimer.h
include/linux/hugetlb.h
include/linux/init_task.h
include/linux/migrate.h
include/linux/mmzone.h
include/linux/pci.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/splice.h
include/linux/timex.h
include/net/cipso_ipv4.h
include/net/sch_generic.h
include/scsi/libsas.h
include/target/target_core_base.h
kernel/cpuset.c
kernel/fork.c
kernel/hrtimer.c
kernel/power/hibernate.c
kernel/power/suspend.c
kernel/power/swap.c
kernel/relay.c
kernel/sched.c
kernel/sched_fair.c
kernel/sched_idletask.c
kernel/time/ntp.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/trace/trace.c
kernel/workqueue.c
mm/compaction.c
mm/filemap.c
mm/hugetlb.c
mm/madvise.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c
mm/page_alloc.c
mm/shmem.c
mm/slab.c
mm/slub.c
mm/vmscan.c
net/batman-adv/routing.c
net/batman-adv/translation-table.c
net/bridge/br_if.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/can/raw.c
net/core/dev.c
net/core/ethtool.c
net/core/netpoll.c
net/core/skbuff.c
net/core/sock.c
net/ipv4/tcp_input.c
net/ipv6/route.c
net/iucv/af_iucv.c
net/l2tp/l2tp_eth.c
net/l2tp/l2tp_ip.c
net/mac80211/rx.c
net/nfc/nci/ntf.c
net/nfc/rawsock.c
net/wireless/reg.c
net/wireless/util.c
scripts/depmod.sh
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/tlv320aic3x.h
sound/soc/soc-dapm.c
tools/hv/hv_kvp_daemon.c
virt/kvm/irq_comm.c

index 21fd05c..e1f856b 100644 (file)
@@ -12,6 +12,12 @@ Rules on what kind of patches are accepted, and which ones are not, into the
    marked CONFIG_BROKEN), an oops, a hang, data corruption, a real
    security issue, or some "oh, that's not good" issue.  In short, something
    critical.
+ - Serious issues as reported by a user of a distribution kernel may also
+   be considered if they fix a notable performance or interactivity issue.
+   As these fixes are not as obvious and have a higher risk of a subtle
+   regression they should only be submitted by a distribution kernel
+   maintainer and include an addendum linking to a bugzilla entry if it
+   exists and additional information on the user-visible impact.
  - New device IDs and quirks are also accepted.
  - No "theoretical race condition" issues, unless an explanation of how the
    race can be exploited is also provided.
index 7eb465e..fa5acc8 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 2
-SUBLEVEL = 21
+SUBLEVEL = 26
 EXTRAVERSION =
 NAME = Saber-toothed Squirrel
 
index e10e59a..1d1710e 100644 (file)
@@ -471,9 +471,7 @@ static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
 static void ipi_timer(void)
 {
        struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
-       irq_enter();
        evt->event_handler(evt);
-       irq_exit();
 }
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
@@ -572,7 +570,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
 
        switch (ipinr) {
        case IPI_TIMER:
+               irq_enter();
                ipi_timer();
+               irq_exit();
                break;
 
        case IPI_RESCHEDULE:
@@ -580,15 +580,21 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
                break;
 
        case IPI_CALL_FUNC:
+               irq_enter();
                generic_smp_call_function_interrupt();
+               irq_exit();
                break;
 
        case IPI_CALL_FUNC_SINGLE:
+               irq_enter();
                generic_smp_call_function_single_interrupt();
+               irq_exit();
                break;
 
        case IPI_CPU_STOP:
+               irq_enter();
                ipi_cpu_stop(cpu);
+               irq_exit();
                break;
 
        default:
index 33ecd0c..b1e05cc 100644 (file)
@@ -157,11 +157,13 @@ int s3c_adc_start(struct s3c_adc_client *client,
                return -EINVAL;
        }
 
-       if (client->is_ts && adc->ts_pend)
-               return -EAGAIN;
-
        spin_lock_irqsave(&adc->lock, flags);
 
+       if (client->is_ts && adc->ts_pend) {
+               spin_unlock_irqrestore(&adc->lock, flags);
+               return -EAGAIN;
+       }
+
        client->channel = channel;
        client->nr_samples = nr_samples;
 
index 7d04875..c0c70a8 100644 (file)
@@ -22,7 +22,7 @@
 #define S3C24XX_VA_WATCHDOG    S3C_VA_WATCHDOG
 
 #define S3C2412_VA_SSMC                S3C_ADDR_CPU(0x00000000)
-#define S3C2412_VA_EBI         S3C_ADDR_CPU(0x00010000)
+#define S3C2412_VA_EBI         S3C_ADDR_CPU(0x00100000)
 
 #define S3C2410_PA_UART                (0x50000000)
 #define S3C24XX_PA_UART                S3C2410_PA_UART
index 40dbb2b..11b19ea 100644 (file)
@@ -24,7 +24,7 @@ static inline void arch_wdt_reset(void)
 
        __raw_writel(0, S3C2410_WTCON);   /* disable watchdog, to be safe  */
 
-       if (s3c2410_wdtclk)
+       if (!IS_ERR(s3c2410_wdtclk))
                clk_enable(s3c2410_wdtclk);
 
        /* put initial values into count and data */
index 97f8bf6..adda036 100644 (file)
@@ -60,6 +60,8 @@ struct thread_info {
 register struct thread_info *__current_thread_info __asm__("$28");
 #define current_thread_info()  __current_thread_info
 
+#endif /* !__ASSEMBLY__ */
+
 /* thread information allocation */
 #if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT)
 #define THREAD_SIZE_ORDER (1)
@@ -97,8 +99,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
 
 #define free_thread_info(info) kfree(info)
 
-#endif /* !__ASSEMBLY__ */
-
 #define PREEMPT_ACTIVE         0x10000000
 
 /*
index a81176f..be281c6 100644 (file)
@@ -1,5 +1,6 @@
 #include <asm/asm-offsets.h>
 #include <asm/page.h>
+#include <asm/thread_info.h>
 #include <asm-generic/vmlinux.lds.h>
 
 #undef mips
@@ -73,7 +74,7 @@ SECTIONS
        .data : {       /* Data */
                . = . + DATAOFFSET;             /* for CONFIG_MAPPED_KERNEL */
 
-               INIT_TASK_DATA(PAGE_SIZE)
+               INIT_TASK_DATA(THREAD_SIZE)
                NOSAVE_DATA
                CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
                READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
index 98b7c4b..fa3f921 100644 (file)
@@ -126,11 +126,11 @@ static inline u64 cputime64_to_jiffies64(const cputime_t ct)
 /*
  * Convert cputime <-> microseconds
  */
-extern u64 __cputime_msec_factor;
+extern u64 __cputime_usec_factor;
 
 static inline unsigned long cputime_to_usecs(const cputime_t ct)
 {
-       return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC;
+       return mulhdu(ct, __cputime_usec_factor);
 }
 
 static inline cputime_t usecs_to_cputime(const unsigned long us)
@@ -143,7 +143,7 @@ static inline cputime_t usecs_to_cputime(const unsigned long us)
        sec = us / 1000000;
        if (ct) {
                ct *= tb_ticks_per_sec;
-               do_div(ct, 1000);
+               do_div(ct, 1000000);
        }
        if (sec)
                ct += (cputime_t) sec * tb_ticks_per_sec;
index 559da19..578e5a0 100644 (file)
 /* Macros for setting and retrieving special purpose registers */
 #ifndef __ASSEMBLY__
 #define mfmsr()                ({unsigned long rval; \
-                       asm volatile("mfmsr %0" : "=r" (rval)); rval;})
+                       asm volatile("mfmsr %0" : "=r" (rval) : \
+                                               : "memory"); rval;})
 #ifdef CONFIG_PPC_BOOK3S_64
 #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
                                     : : "r" (v) : "memory")
index bf99cfa..6324008 100644 (file)
@@ -245,9 +245,9 @@ __ftrace_make_nop(struct module *mod,
 
        /*
         * On PPC32 the trampoline looks like:
-        *  0x3d, 0x60, 0x00, 0x00  lis r11,sym@ha
-        *  0x39, 0x6b, 0x00, 0x00  addi r11,r11,sym@l
-        *  0x7d, 0x69, 0x03, 0xa6  mtctr r11
+        *  0x3d, 0x80, 0x00, 0x00  lis r12,sym@ha
+        *  0x39, 0x8c, 0x00, 0x00  addi r12,r12,sym@l
+        *  0x7d, 0x89, 0x03, 0xa6  mtctr r12
         *  0x4e, 0x80, 0x04, 0x20  bctr
         */
 
@@ -262,9 +262,9 @@ __ftrace_make_nop(struct module *mod,
        pr_devel(" %08x %08x ", jmp[0], jmp[1]);
 
        /* verify that this is what we expect it to be */
-       if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
-           ((jmp[1] & 0xffff0000) != 0x396b0000) ||
-           (jmp[2] != 0x7d6903a6) ||
+       if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
+           ((jmp[1] & 0xffff0000) != 0x398c0000) ||
+           (jmp[2] != 0x7d8903a6) ||
            (jmp[3] != 0x4e800420)) {
                printk(KERN_ERR "Not a trampoline\n");
                return -EINVAL;
index 5db163c..ec8affe 100644 (file)
@@ -168,13 +168,13 @@ EXPORT_SYMBOL_GPL(ppc_tb_freq);
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 /*
  * Factors for converting from cputime_t (timebase ticks) to
- * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
+ * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
  * These are all stored as 0.64 fixed-point binary fractions.
  */
 u64 __cputime_jiffies_factor;
 EXPORT_SYMBOL(__cputime_jiffies_factor);
-u64 __cputime_msec_factor;
-EXPORT_SYMBOL(__cputime_msec_factor);
+u64 __cputime_usec_factor;
+EXPORT_SYMBOL(__cputime_usec_factor);
 u64 __cputime_sec_factor;
 EXPORT_SYMBOL(__cputime_sec_factor);
 u64 __cputime_clockt_factor;
@@ -192,8 +192,8 @@ static void calc_cputime_factors(void)
 
        div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
        __cputime_jiffies_factor = res.result_low;
-       div128_by_32(1000, 0, tb_ticks_per_sec, &res);
-       __cputime_msec_factor = res.result_low;
+       div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
+       __cputime_usec_factor = res.result_low;
        div128_by_32(1, 0, tb_ticks_per_sec, &res);
        __cputime_sec_factor = res.result_low;
        div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
index 44d8829..5e8dc08 100644 (file)
@@ -763,7 +763,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        lwz     r3,VCORE_NAPPING_THREADS(r5)
        lwz     r4,VCPU_PTID(r9)
        li      r0,1
-       sldi    r0,r0,r4
+       sld     r0,r0,r4
        andc.   r3,r3,r0                /* no sense IPI'ing ourselves */
        beq     43f
        mulli   r4,r4,PACA_SIZE         /* get paca for thread 0 */
index 03a217a..b7e63d8 100644 (file)
@@ -975,7 +975,7 @@ static int cpu_cmd(void)
                /* print cpus waiting or in xmon */
                printf("cpus stopped:");
                count = 0;
-               for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+               for_each_possible_cpu(cpu) {
                        if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
                                if (count == 0)
                                        printf(" %x", cpu);
index 6e0073e..07c7bf4 100644 (file)
@@ -26,12 +26,14 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
 void __cpuinit cpu_init(void)
 {
        struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
+       struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
 
        get_cpu_id(id);
        atomic_inc(&init_mm.mm_count);
        current->active_mm = &init_mm;
        BUG_ON(current->mm);
        enter_lazy_tlb(&init_mm, current);
+       memset(idle, 0, sizeof(*idle));
 }
 
 /*
index 3ea8728..1df64a8 100644 (file)
@@ -1020,14 +1020,11 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
        unsigned int cpu = (unsigned int)(long)hcpu;
        struct cpu *c = &per_cpu(cpu_devices, cpu);
        struct sys_device *s = &c->sysdev;
-       struct s390_idle_data *idle;
        int err = 0;
 
        switch (action) {
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
-               idle = &per_cpu(s390_idle, cpu);
-               memset(idle, 0, sizeof(struct s390_idle_data));
                err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
                break;
        case CPU_DEAD:
index f3444f7..0c3b775 100644 (file)
 #define X86_FEATURE_XSAVEOPT   (7*32+ 4) /* Optimized Xsave */
 #define X86_FEATURE_PLN                (7*32+ 5) /* Intel Power Limit Notification */
 #define X86_FEATURE_PTS                (7*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_DTS                (7*32+ 7) /* Digital Thermal Sensor */
+#define X86_FEATURE_DTHERM     (7*32+ 7) /* Digital Thermal Sensor */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  (8*32+ 0) /* Intel TPR Shadow */
index effff47..cb00ccc 100644 (file)
@@ -31,6 +31,60 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
        ptep->pte_low = pte.pte_low;
 }
 
+#define pmd_read_atomic pmd_read_atomic
+/*
+ * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
+ * a "*pmdp" dereference done by gcc. Problem is, in certain places
+ * where pte_offset_map_lock is called, concurrent page faults are
+ * allowed, if the mmap_sem is hold for reading. An example is mincore
+ * vs page faults vs MADV_DONTNEED. On the page fault side
+ * pmd_populate rightfully does a set_64bit, but if we're reading the
+ * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
+ * because gcc will not read the 64bit of the pmd atomically. To fix
+ * this all places running pmd_offset_map_lock() while holding the
+ * mmap_sem in read mode, shall read the pmdp pointer using this
+ * function to know if the pmd is null nor not, and in turn to know if
+ * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
+ * operations.
+ *
+ * Without THP if the mmap_sem is hold for reading, the pmd can only
+ * transition from null to not null while pmd_read_atomic runs. So
+ * we can always return atomic pmd values with this function.
+ *
+ * With THP if the mmap_sem is hold for reading, the pmd can become
+ * trans_huge or none or point to a pte (and in turn become "stable")
+ * at any time under pmd_read_atomic. We could read it really
+ * atomically here with a atomic64_read for the THP enabled case (and
+ * it would be a whole lot simpler), but to avoid using cmpxchg8b we
+ * only return an atomic pmdval if the low part of the pmdval is later
+ * found stable (i.e. pointing to a pte). And we're returning a none
+ * pmdval if the low part of the pmd is none. In some cases the high
+ * and low part of the pmdval returned may not be consistent if THP is
+ * enabled (the low part may point to previously mapped hugepage,
+ * while the high part may point to a more recently mapped hugepage),
+ * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
+ * of the pmd to be read atomically to decide if the pmd is unstable
+ * or not, with the only exception of when the low part of the pmd is
+ * zero in which case we return a none pmd.
+ */
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+       pmdval_t ret;
+       u32 *tmp = (u32 *)pmdp;
+
+       ret = (pmdval_t) (*tmp);
+       if (ret) {
+               /*
+                * If the low part is null, we must not read the high part
+                * or we can end up with a partial pmd.
+                */
+               smp_rmb();
+               ret |= ((pmdval_t)*(tmp + 1)) << 32;
+       }
+
+       return (pmd_t) { ret };
+}
+
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 {
        set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
index bb3ee36..f7c89e2 100644 (file)
@@ -99,7 +99,6 @@ struct cpuinfo_x86 {
        u16                     apicid;
        u16                     initial_apicid;
        u16                     x86_clflush_size;
-#ifdef CONFIG_SMP
        /* number of cores as seen by the OS: */
        u16                     booted_cores;
        /* Physical processor id: */
@@ -110,7 +109,6 @@ struct cpuinfo_x86 {
        u8                      compute_unit_id;
        /* Index into per_cpu list: */
        u16                     cpu_index;
-#endif
        u32                     microcode;
 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
 
index 4558f0d..479d03c 100644 (file)
@@ -416,12 +416,14 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
                return 0;
        }
 
-       if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+       if (intsrc->source_irq == 0) {
                if (acpi_skip_timer_override) {
-                       printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+                       printk(PREFIX "BIOS IRQ0 override ignored.\n");
                        return 0;
                }
-               if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+
+               if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
+                       && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
                        intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
                        printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
                }
@@ -1327,17 +1329,12 @@ static int __init dmi_disable_acpi(const struct dmi_system_id *d)
 }
 
 /*
- * Force ignoring BIOS IRQ0 pin2 override
+ * Force ignoring BIOS IRQ0 override
  */
 static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
 {
-       /*
-        * The ati_ixp4x0_rev() early PCI quirk should have set
-        * the acpi_skip_timer_override flag already:
-        */
        if (!acpi_skip_timer_override) {
-               WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n");
-               pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n",
+               pr_notice("%s detected: Ignoring BIOS IRQ0 override\n",
                        d->ident);
                acpi_skip_timer_override = 1;
        }
@@ -1431,7 +1428,7 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
         * is enabled.  This input is incorrectly designated the
         * ISA IRQ 0 via an interrupt source override even though
         * it is wired to the output of the master 8259A and INTIN0
-        * is not connected at all.  Force ignoring BIOS IRQ0 pin2
+        * is not connected at all.  Force ignoring BIOS IRQ0
         * override in that cases.
         */
        {
@@ -1466,6 +1463,14 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
                     DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
                     },
         },
+       {
+        .callback = dmi_ignore_irq0_timer_override,
+        .ident = "FUJITSU SIEMENS",
+        .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
+                    },
+        },
        {}
 };
 
index bae1efe..be16854 100644 (file)
@@ -154,16 +154,14 @@ int amd_get_subcaches(int cpu)
 {
        struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
        unsigned int mask;
-       int cuid = 0;
+       int cuid;
 
        if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
                return 0;
 
        pci_read_config_dword(link, 0x1d4, &mask);
 
-#ifdef CONFIG_SMP
        cuid = cpu_data(cpu).compute_unit_id;
-#endif
        return (mask >> (4 * cuid)) & 0xf;
 }
 
@@ -172,7 +170,7 @@ int amd_set_subcaches(int cpu, int mask)
        static unsigned int reset, ban;
        struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
        unsigned int reg;
-       int cuid = 0;
+       int cuid;
 
        if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
                return -EINVAL;
@@ -190,9 +188,7 @@ int amd_set_subcaches(int cpu, int mask)
                pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
        }
 
-#ifdef CONFIG_SMP
        cuid = cpu_data(cpu).compute_unit_id;
-#endif
        mask <<= 4 * cuid;
        mask |= (0xf ^ (1 << cuid)) << 26;
 
index 3524e1f..ff8557e 100644 (file)
@@ -148,7 +148,6 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
 
 static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
 {
-#ifdef CONFIG_SMP
        /* calling is from identify_secondary_cpu() ? */
        if (!c->cpu_index)
                return;
@@ -192,7 +191,6 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
 
 valid_k7:
        ;
-#endif
 }
 
 static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
index aa003b1..ca93cc7 100644 (file)
@@ -676,9 +676,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
        if (this_cpu->c_early_init)
                this_cpu->c_early_init(c);
 
-#ifdef CONFIG_SMP
        c->cpu_index = 0;
-#endif
        filter_cpuid_features(c, false);
 
        setup_smep(c);
@@ -764,10 +762,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
                c->apicid = c->initial_apicid;
 # endif
 #endif
-
-#ifdef CONFIG_X86_HT
                c->phys_proc_id = c->initial_apicid;
-#endif
        }
 
        setup_smep(c);
index 5231312..3e6ff6c 100644 (file)
@@ -181,7 +181,6 @@ static void __cpuinit trap_init_f00f_bug(void)
 
 static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
 {
-#ifdef CONFIG_SMP
        /* calling is from identify_secondary_cpu() ? */
        if (!c->cpu_index)
                return;
@@ -198,7 +197,6 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
                WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
                                    "with B stepping processors.\n");
        }
-#endif
 }
 
 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
index b0f1271..3b67877 100644 (file)
@@ -119,9 +119,7 @@ void mce_setup(struct mce *m)
        m->time = get_seconds();
        m->cpuvendor = boot_cpu_data.x86_vendor;
        m->cpuid = cpuid_eax(1);
-#ifdef CONFIG_SMP
        m->socketid = cpu_data(m->extcpu).phys_proc_id;
-#endif
        m->apicid = cpu_data(m->extcpu).initial_apicid;
        rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
 }
index 445a61c..d4444be 100644 (file)
@@ -65,11 +65,9 @@ struct threshold_bank {
 };
 static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
 
-#ifdef CONFIG_SMP
 static unsigned char shared_bank[NR_BANKS] = {
        0, 0, 0, 0, 1
 };
-#endif
 
 static DEFINE_PER_CPU(unsigned char, bank_map);        /* see which banks are on */
 
@@ -227,10 +225,9 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
 
                        if (!block)
                                per_cpu(bank_map, cpu) |= (1 << bank);
-#ifdef CONFIG_SMP
+
                        if (shared_bank[bank] && c->cpu_core_id)
                                break;
-#endif
 
                        memset(&b, 0, sizeof(b));
                        b.cpu                   = cpu;
index 14b2314..8022c66 100644 (file)
@@ -64,12 +64,10 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
 static int show_cpuinfo(struct seq_file *m, void *v)
 {
        struct cpuinfo_x86 *c = v;
-       unsigned int cpu = 0;
+       unsigned int cpu;
        int i;
 
-#ifdef CONFIG_SMP
        cpu = c->cpu_index;
-#endif
        seq_printf(m, "processor\t: %u\n"
                   "vendor_id\t: %s\n"
                   "cpu family\t: %d\n"
index c7f64e6..ea6106c 100644 (file)
@@ -31,7 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
        const struct cpuid_bit *cb;
 
        static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
-               { X86_FEATURE_DTS,              CR_EAX, 0, 0x00000006, 0 },
+               { X86_FEATURE_DTHERM,           CR_EAX, 0, 0x00000006, 0 },
                { X86_FEATURE_IDA,              CR_EAX, 1, 0x00000006, 0 },
                { X86_FEATURE_ARAT,             CR_EAX, 2, 0x00000006, 0 },
                { X86_FEATURE_PLN,              CR_EAX, 4, 0x00000006, 0 },
index 563a09d..29c95d7 100644 (file)
@@ -297,20 +297,31 @@ static ssize_t reload_store(struct sys_device *dev,
                            const char *buf, size_t size)
 {
        unsigned long val;
-       int cpu = dev->id;
-       int ret = 0;
-       char *end;
+       int cpu;
+       ssize_t ret = 0, tmp_ret;
 
-       val = simple_strtoul(buf, &end, 0);
-       if (end == buf)
+       /* allow reload only from the BSP */
+       if (boot_cpu_data.cpu_index != dev->id)
                return -EINVAL;
 
-       if (val == 1) {
-               get_online_cpus();
-               if (cpu_online(cpu))
-                       ret = reload_for_cpu(cpu);
-               put_online_cpus();
+       ret = kstrtoul(buf, 0, &val);
+       if (ret)
+               return ret;
+
+       if (val != 1)
+               return size;
+
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
+               tmp_ret = reload_for_cpu(cpu);
+               if (tmp_ret != 0)
+                       pr_warn("Error reloading microcode on CPU %d\n", cpu);
+
+               /* save retval of the first encountered reload error */
+               if (!ret)
+                       ret = tmp_ret;
        }
+       put_online_cpus();
 
        if (!ret)
                ret = size;
index 37a458b..e61f79c 100644 (file)
@@ -460,6 +460,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
                },
        },
+       {       /* Handle problems with rebooting on the Precision M6600. */
+               .callback = set_pci_reboot,
+               .ident = "Dell OptiPlex 990",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
+               },
+       },
        { }
 };
 
index 6dd8955..0951b81 100644 (file)
@@ -521,3 +521,20 @@ static void sb600_disable_hpet_bar(struct pci_dev *dev)
        }
 }
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar);
+
+/*
+ * Twinhead H12Y needs us to block out a region otherwise we map devices
+ * there and any access kills the box.
+ *
+ *   See: https://bugzilla.kernel.org/show_bug.cgi?id=10231
+ *
+ * Match off the LPC and svid/sdid (older kernels lose the bridge subvendor)
+ */
+static void __devinit twinhead_reserve_killing_zone(struct pci_dev *dev)
+{
+        if (dev->subsystem_vendor == 0x14FF && dev->subsystem_device == 0xA003) {
+                pr_info("Reserving memory on Twinhead H12Y\n");
+                request_mem_region(0xFFB00000, 0x100000, "twinhead");
+        }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
index 15de223..49d9e91 100644 (file)
@@ -607,7 +607,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue);
 
 int blk_get_queue(struct request_queue *q)
 {
-       if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
+       if (likely(!blk_queue_dead(q))) {
                kobject_get(&q->kobj);
                return 0;
        }
@@ -754,7 +754,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
        const bool is_sync = rw_is_sync(rw_flags) != 0;
        int may_queue;
 
-       if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+       if (unlikely(blk_queue_dead(q)))
                return NULL;
 
        may_queue = elv_may_queue(q, rw_flags);
@@ -874,7 +874,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
                struct io_context *ioc;
                struct request_list *rl = &q->rq;
 
-               if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+               if (unlikely(blk_queue_dead(q)))
                        return NULL;
 
                prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
index a1ebceb..6053285 100644 (file)
@@ -50,7 +50,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
 {
        int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
 
-       if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
+       if (unlikely(blk_queue_dead(q))) {
                rq->errors = -ENXIO;
                if (rq->end_io)
                        rq->end_io(rq, rq->errors);
index e7f9f65..f0b2ca8 100644 (file)
@@ -425,7 +425,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
        if (!entry->show)
                return -EIO;
        mutex_lock(&q->sysfs_lock);
-       if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+       if (blk_queue_dead(q)) {
                mutex_unlock(&q->sysfs_lock);
                return -ENOENT;
        }
@@ -447,7 +447,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
 
        q = container_of(kobj, struct request_queue, kobj);
        mutex_lock(&q->sysfs_lock);
-       if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+       if (blk_queue_dead(q)) {
                mutex_unlock(&q->sysfs_lock);
                return -ENOENT;
        }
index 4553245..5eed6a7 100644 (file)
@@ -310,7 +310,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
        struct request_queue *q = td->queue;
 
        /* no throttling for dead queue */
-       if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+       if (unlikely(blk_queue_dead(q)))
                return NULL;
 
        rcu_read_lock();
@@ -335,7 +335,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
        spin_lock_irq(q->queue_lock);
 
        /* Make sure @q is still alive */
-       if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
+       if (unlikely(blk_queue_dead(q))) {
                kfree(tg);
                return NULL;
        }
index 3f6551b..e38691d 100644 (file)
@@ -85,7 +85,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
                        q->flush_queue_delayed = 1;
                        return NULL;
                }
-               if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) ||
+               if (unlikely(blk_queue_dead(q)) ||
                    !q->elevator->ops->elevator_dispatch_fn(q, 0))
                        return NULL;
        }
index 688be8a..9e76a32 100644 (file)
@@ -721,11 +721,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
                break;
        }
 
+       if (capable(CAP_SYS_RAWIO))
+               return 0;
+
        /* In particular, rule out all resets and host-specific ioctls.  */
        printk_ratelimited(KERN_WARNING
                           "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
 
-       return capable(CAP_SYS_RAWIO) ? 0 : -ENOTTY;
+       return -ENOTTY;
 }
 EXPORT_SYMBOL(scsi_verify_blk_ioctl);
 
index 6512b20..d1fcbc0 100644 (file)
@@ -292,7 +292,9 @@ static int acpi_ac_add(struct acpi_device *device)
        ac->charger.properties = ac_props;
        ac->charger.num_properties = ARRAY_SIZE(ac_props);
        ac->charger.get_property = get_ac_property;
-       power_supply_register(&ac->device->dev, &ac->charger);
+       result = power_supply_register(&ac->device->dev, &ac->charger);
+       if (result)
+               goto end;
 
        printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
               acpi_device_name(device), acpi_device_bid(device),
index a43fa1a..1502c50 100644 (file)
@@ -36,6 +36,7 @@
 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
 static DEFINE_MUTEX(isolated_cpus_lock);
+static DEFINE_MUTEX(round_robin_lock);
 
 static unsigned long power_saving_mwait_eax;
 
@@ -107,7 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index)
        if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
                return;
 
-       mutex_lock(&isolated_cpus_lock);
+       mutex_lock(&round_robin_lock);
        cpumask_clear(tmp);
        for_each_cpu(cpu, pad_busy_cpus)
                cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
@@ -116,7 +117,7 @@ static void round_robin_cpu(unsigned int tsk_index)
        if (cpumask_empty(tmp))
                cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
        if (cpumask_empty(tmp)) {
-               mutex_unlock(&isolated_cpus_lock);
+               mutex_unlock(&round_robin_lock);
                return;
        }
        for_each_cpu(cpu, tmp) {
@@ -131,7 +132,7 @@ static void round_robin_cpu(unsigned int tsk_index)
        tsk_in_cpu[tsk_index] = preferred_cpu;
        cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
        cpu_weight[preferred_cpu]++;
-       mutex_unlock(&isolated_cpus_lock);
+       mutex_unlock(&round_robin_lock);
 
        set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
 }
index c850de4..eff7222 100644 (file)
@@ -189,10 +189,12 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
                 *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
                 * }
                 *
-                * Ignores apic_id and always return 0 for CPU0's handle.
+                * Ignores apic_id and always returns 0 for the processor
+                * handle with acpi id 0 if nr_cpu_ids is 1.
+                * This should be the case if SMP tables are not found.
                 * Return -1 for other CPU's handle.
                 */
-               if (acpi_id == 0)
+               if (nr_cpu_ids <= 1 && acpi_id == 0)
                        return acpi_id;
                else
                        return apic_id;
index ca191ff..ed6bc52 100644 (file)
@@ -702,8 +702,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
         * can wake the system.  _S0W may be valid, too.
         */
        if (acpi_target_sleep_state == ACPI_STATE_S0 ||
-           (device_may_wakeup(dev) &&
-            adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
+           (device_may_wakeup(dev) && adev->wakeup.flags.valid &&
+            adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
                acpi_status status;
 
                acpi_method[3] = 'W';
index 9f66181..240a244 100644 (file)
@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
 {
        int result = 0;
 
-       if (!strncmp(val, "enable", strlen("enable") - 1)) {
+       if (!strncmp(val, "enable", strlen("enable"))) {
                result = acpi_debug_trace(trace_method_name, trace_debug_level,
                                          trace_debug_layer, 0);
                if (result)
@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
                goto exit;
        }
 
-       if (!strncmp(val, "disable", strlen("disable") - 1)) {
+       if (!strncmp(val, "disable", strlen("disable"))) {
                int name = 0;
                result = acpi_debug_trace((char *)&name, trace_debug_level,
                                          trace_debug_layer, 0);
index c3d2dfc..b96544a 100644 (file)
@@ -869,7 +869,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        dpm_wait_for_children(dev, async);
 
        if (async_error)
-               return 0;
+               goto Complete;
 
        pm_runtime_get_noresume(dev);
        if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
@@ -878,7 +878,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        if (pm_wakeup_pending()) {
                pm_runtime_put_sync(dev);
                async_error = -EBUSY;
-               return 0;
+               goto Complete;
        }
 
        device_lock(dev);
@@ -926,6 +926,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        }
 
        device_unlock(dev);
+
+ Complete:
        complete_all(&dev->power.completion);
 
        if (error) {
index aa27120..9a72277 100644 (file)
@@ -513,6 +513,44 @@ static void process_page(unsigned long data)
        }
 }
 
+struct mm_plug_cb {
+       struct blk_plug_cb cb;
+       struct cardinfo *card;
+};
+
+static void mm_unplug(struct blk_plug_cb *cb)
+{
+       struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb);
+
+       spin_lock_irq(&mmcb->card->lock);
+       activate(mmcb->card);
+       spin_unlock_irq(&mmcb->card->lock);
+       kfree(mmcb);
+}
+
+static int mm_check_plugged(struct cardinfo *card)
+{
+       struct blk_plug *plug = current->plug;
+       struct mm_plug_cb *mmcb;
+
+       if (!plug)
+               return 0;
+
+       list_for_each_entry(mmcb, &plug->cb_list, cb.list) {
+               if (mmcb->cb.callback == mm_unplug && mmcb->card == card)
+                       return 1;
+       }
+       /* Not currently on the callback list */
+       mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC);
+       if (!mmcb)
+               return 0;
+
+       mmcb->card = card;
+       mmcb->cb.callback = mm_unplug;
+       list_add(&mmcb->cb.list, &plug->cb_list);
+       return 1;
+}
+
 static void mm_make_request(struct request_queue *q, struct bio *bio)
 {
        struct cardinfo *card = q->queuedata;
@@ -523,6 +561,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
        *card->biotail = bio;
        bio->bi_next = NULL;
        card->biotail = &bio->bi_next;
+       if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+               activate(card);
        spin_unlock_irq(&card->lock);
 
        return;
index 0477982..1b5675b 100644 (file)
@@ -34,7 +34,7 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
        u32 *data = buf;
 
        /* data ready? */
-       if (readl(trng->base + TRNG_ODATA) & 1) {
+       if (readl(trng->base + TRNG_ISR) & 1) {
                *data = readl(trng->base + TRNG_ODATA);
                /*
                  ensure data ready is only set again AFTER the next data
index 70ad892..b3ccefa 100644 (file)
@@ -1932,12 +1932,6 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
        if (mce->bank != 8)
                return NOTIFY_DONE;
 
-#ifdef CONFIG_SMP
-       /* Only handle if it is the right mc controller */
-       if (mce->socketid != pvt->i7core_dev->socket)
-               return NOTIFY_DONE;
-#endif
-
        smp_rmb();
        if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
                smp_wmb();
@@ -2234,8 +2228,6 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
        if (pvt->enable_scrub)
                disable_sdram_scrub_setting(mci);
 
-       atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &i7_mce_dec);
-
        /* Disable EDAC polling */
        i7core_pci_ctl_release(pvt);
 
@@ -2336,8 +2328,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
        /* DCLK for scrub rate setting */
        pvt->dclk_freq = get_dclk_freq();
 
-       atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
-
        return 0;
 
 fail0:
@@ -2481,8 +2471,10 @@ static int __init i7core_init(void)
 
        pci_rc = pci_register_driver(&i7core_driver);
 
-       if (pci_rc >= 0)
+       if (pci_rc >= 0) {
+               atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
                return 0;
+       }
 
        i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
                      pci_rc);
@@ -2498,6 +2490,7 @@ static void __exit i7core_exit(void)
 {
        debugf2("MC: " __FILE__ ": %s()\n", __func__);
        pci_unregister_driver(&i7core_driver);
+       atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &i7_mce_dec);
 }
 
 module_init(i7core_init);
index 7a402bf..0db57b5 100644 (file)
@@ -1609,11 +1609,9 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
                mce->cpuvendor, mce->cpuid, mce->time,
                mce->socketid, mce->apicid);
 
-#ifdef CONFIG_SMP
        /* Only handle if it is the right mc controller */
        if (cpu_data(mce->cpu).phys_proc_id != pvt->sbridge_dev->mc)
                return NOTIFY_DONE;
-#endif
 
        smp_rmb();
        if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
@@ -1661,9 +1659,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
        debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
                __func__, mci, &sbridge_dev->pdev[0]->dev);
 
-       atomic_notifier_chain_unregister(&x86_mce_decoder_chain,
-                                        &sbridge_mce_dec);
-
        /* Remove MC sysfs nodes */
        edac_mc_del_mc(mci->dev);
 
@@ -1731,8 +1726,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
                goto fail0;
        }
 
-       atomic_notifier_chain_register(&x86_mce_decoder_chain,
-                                      &sbridge_mce_dec);
        return 0;
 
 fail0:
@@ -1861,8 +1854,10 @@ static int __init sbridge_init(void)
 
        pci_rc = pci_register_driver(&sbridge_driver);
 
-       if (pci_rc >= 0)
+       if (pci_rc >= 0) {
+               atomic_notifier_chain_register(&x86_mce_decoder_chain, &sbridge_mce_dec);
                return 0;
+       }
 
        sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
                      pci_rc);
@@ -1878,6 +1873,7 @@ static void __exit sbridge_exit(void)
 {
        debugf2("MC: " __FILE__ ": %s()\n", __func__);
        pci_unregister_driver(&sbridge_driver);
+       atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &sbridge_mce_dec);
 }
 
 module_init(sbridge_init);
index 96198f3..a2da8f2 100644 (file)
@@ -89,8 +89,11 @@ static int wm8994_gpio_direction_out(struct gpio_chip *chip,
        struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
        struct wm8994 *wm8994 = wm8994_gpio->wm8994;
 
+       if (value)
+               value = WM8994_GPN_LVL;
+
        return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
-                              WM8994_GPN_DIR, 0);
+                              WM8994_GPN_DIR | WM8994_GPN_LVL, value);
 }
 
 static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
index 3e927ce..a1ee634 100644 (file)
@@ -585,7 +585,7 @@ static bool
 drm_monitor_supports_rb(struct edid *edid)
 {
        if (edid->revision >= 4) {
-               bool ret;
+               bool ret = false;
                drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
                return ret;
        }
index c4da951..ca67338 100644 (file)
@@ -1890,6 +1890,27 @@ ips_ping_for_i915_load(void)
        }
 }
 
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+       struct apertures_struct *ap;
+       struct pci_dev *pdev = dev_priv->dev->pdev;
+       bool primary;
+
+       ap = alloc_apertures(1);
+       if (!ap)
+               return;
+
+       ap->ranges[0].base = dev_priv->dev->agp->base;
+       ap->ranges[0].size =
+               dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+       primary =
+               pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+       remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+       kfree(ap);
+}
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -1927,6 +1948,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto free_priv;
        }
 
+       dev_priv->mm.gtt = intel_gtt_get();
+       if (!dev_priv->mm.gtt) {
+               DRM_ERROR("Failed to initialize GTT\n");
+               ret = -ENODEV;
+               goto put_bridge;
+       }
+
+       i915_kick_out_firmware_fb(dev_priv);
+
        /* overlay on gen2 is broken and can't address above 1G */
        if (IS_GEN2(dev))
                dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
@@ -1950,13 +1980,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto put_bridge;
        }
 
-       dev_priv->mm.gtt = intel_gtt_get();
-       if (!dev_priv->mm.gtt) {
-               DRM_ERROR("Failed to initialize GTT\n");
-               ret = -ENODEV;
-               goto out_rmmap;
-       }
-
        agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 
        dev_priv->mm.gtt_mapping =
index 3e7c478..3e2edc6 100644 (file)
@@ -3312,6 +3312,10 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 
                        if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
                                ret = -EIO;
+               } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
+                                                     seqno) ||
+                                   atomic_read(&dev_priv->mm.wedged), 3000)) {
+                       ret = -EBUSY;
                }
        }
 
index d3820c2..578ddfc 100644 (file)
@@ -424,6 +424,30 @@ static void gen6_pm_rps_work(struct work_struct *work)
        mutex_unlock(&dev_priv->dev->struct_mutex);
 }
 
+static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
+                               u32 pm_iir)
+{
+       unsigned long flags;
+
+       /*
+        * IIR bits should never already be set because IMR should
+        * prevent an interrupt from being shown in IIR. The warning
+        * displays a case where we've unsafely cleared
+        * dev_priv->pm_iir. Although missing an interrupt of the same
+        * type is not a problem, it displays a problem in the logic.
+        *
+        * The mask bit in IMR is cleared by rps_work.
+        */
+
+       spin_lock_irqsave(&dev_priv->rps_lock, flags);
+       dev_priv->pm_iir |= pm_iir;
+       I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+       POSTING_READ(GEN6_PMIMR);
+       spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
+
+       queue_work(dev_priv->wq, &dev_priv->rps_work);
+}
+
 static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -529,16 +553,8 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
                pch_irq_handler(dev, pch_iir);
        }
 
-       if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
-               unsigned long flags;
-               spin_lock_irqsave(&dev_priv->rps_lock, flags);
-               WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
-               dev_priv->pm_iir |= pm_iir;
-               I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
-               POSTING_READ(GEN6_PMIMR);
-               spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
-               queue_work(dev_priv->wq, &dev_priv->rps_work);
-       }
+       if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
+               gen6_queue_rps_work(dev_priv, pm_iir);
 
        /* should clear PCH hotplug event before clear CPU irq */
        I915_WRITE(SDEIIR, pch_iir);
@@ -634,25 +650,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
                i915_handle_rps_change(dev);
        }
 
-       if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) {
-               /*
-                * IIR bits should never already be set because IMR should
-                * prevent an interrupt from being shown in IIR. The warning
-                * displays a case where we've unsafely cleared
-                * dev_priv->pm_iir. Although missing an interrupt of the same
-                * type is not a problem, it displays a problem in the logic.
-                *
-                * The mask bit in IMR is cleared by rps_work.
-                */
-               unsigned long flags;
-               spin_lock_irqsave(&dev_priv->rps_lock, flags);
-               WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
-               dev_priv->pm_iir |= pm_iir;
-               I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
-               POSTING_READ(GEN6_PMIMR);
-               spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
-               queue_work(dev_priv->wq, &dev_priv->rps_work);
-       }
+       if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
+               gen6_queue_rps_work(dev_priv, pm_iir);
 
        /* should clear PCH hotplug event before clear CPU irq */
        I915_WRITE(SDEIIR, pch_iir);
index a1eb83d..f38d196 100644 (file)
@@ -739,8 +739,11 @@ static void i915_restore_display(struct drm_device *dev)
        if (HAS_PCH_SPLIT(dev)) {
                I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
                I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
-               I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
+               /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+                * otherwise we get blank eDP screen after S3 on some machines
+                */
                I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
+               I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
                I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
                I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
                I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
index 5c1cdb8..cc75c4b 100644 (file)
@@ -2186,6 +2186,33 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        return 0;
 }
 
+static int
+intel_finish_fb(struct drm_framebuffer *old_fb)
+{
+       struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       bool was_interruptible = dev_priv->mm.interruptible;
+       int ret;
+
+       wait_event(dev_priv->pending_flip_queue,
+                  atomic_read(&dev_priv->mm.wedged) ||
+                  atomic_read(&obj->pending_flip) == 0);
+
+       /* Big Hammer, we also need to ensure that any pending
+        * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+        * current scanout is retired before unpinning the old
+        * framebuffer.
+        *
+        * This should only fail upon a hung GPU, in which case we
+        * can safely continue.
+        */
+       dev_priv->mm.interruptible = false;
+       ret = i915_gem_object_finish_gpu(obj);
+       dev_priv->mm.interruptible = was_interruptible;
+
+       return ret;
+}
+
 static int
 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                    struct drm_framebuffer *old_fb)
@@ -2224,25 +2251,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                return ret;
        }
 
-       if (old_fb) {
-               struct drm_i915_private *dev_priv = dev->dev_private;
-               struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
-
-               wait_event(dev_priv->pending_flip_queue,
-                          atomic_read(&dev_priv->mm.wedged) ||
-                          atomic_read(&obj->pending_flip) == 0);
-
-               /* Big Hammer, we also need to ensure that any pending
-                * MI_WAIT_FOR_EVENT inside a user batch buffer on the
-                * current scanout is retired before unpinning the old
-                * framebuffer.
-                *
-                * This should only fail upon a hung GPU, in which case we
-                * can safely continue.
-                */
-               ret = i915_gem_object_finish_gpu(obj);
-               (void) ret;
-       }
+       if (old_fb)
+               intel_finish_fb(old_fb);
 
        ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
                                         LEAVE_ATOMIC_MODE_SET);
@@ -3312,6 +3322,23 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
        struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
        struct drm_device *dev = crtc->dev;
 
+       /* Flush any pending WAITs before we disable the pipe. Note that
+        * we need to drop the struct_mutex in order to acquire it again
+        * during the lowlevel dpms routines around a couple of the
+        * operations. It does not look trivial nor desirable to move
+        * that locking higher. So instead we leave a window for the
+        * submission of further commands on the fb before we can actually
+        * disable it. This race with userspace exists anyway, and we can
+        * only rely on the pipe being disabled by userspace after it
+        * receives the hotplug notification and has flushed any pending
+        * batches.
+        */
+       if (crtc->fb) {
+               mutex_lock(&dev->struct_mutex);
+               intel_finish_fb(crtc->fb);
+               mutex_unlock(&dev->struct_mutex);
+       }
+
        crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
 
        if (crtc->fb) {
@@ -8016,8 +8043,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
        if (intel_enable_rc6(dev_priv->dev))
-               rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
-                       GEN6_RC_CTL_RC6_ENABLE;
+               rc6_mask = GEN6_RC_CTL_RC6_ENABLE |
+                       ((IS_GEN7(dev_priv->dev)) ? GEN6_RC_CTL_RC6p_ENABLE : 0);
 
        I915_WRITE(GEN6_RC_CONTROL,
                   rc6_mask |
index 933e66b..f6613dc 100644 (file)
@@ -306,7 +306,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
 
        I915_WRITE_CTL(ring,
                        ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
-                       | RING_REPORT_64K | RING_VALID);
+                       | RING_VALID);
 
        /* If the head is still not zero, the ring is dead */
        if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
@@ -1157,18 +1157,6 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long end;
-       u32 head;
-
-       /* If the reported head position has wrapped or hasn't advanced,
-        * fallback to the slow and accurate path.
-        */
-       head = intel_read_status_page(ring, 4);
-       if (head > ring->head) {
-               ring->head = head;
-               ring->space = ring_space(ring);
-               if (ring->space >= n)
-                       return 0;
-       }
 
        trace_i915_ring_wait_begin(ring);
        end = jiffies + 3 * HZ;
index 3a4cc32..cc0801d 100644 (file)
@@ -499,7 +499,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
        nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
 
        ret = drm_fb_helper_init(dev, &nfbdev->helper,
-                                nv_two_heads(dev) ? 2 : 1, 4);
+                                dev->mode_config.num_crtc, 4);
        if (ret) {
                kfree(nfbdev);
                return ret;
index eaf35f8..d894731 100644 (file)
@@ -118,9 +118,9 @@ dispatch_dma:
 // mthd 0x030c-0x0340, various stuff
 .b16 0xc3 14
 .b32 ctx_src_address_high           ~0x000000ff
-.b32 ctx_src_address_low            ~0xfffffff0
+.b32 ctx_src_address_low            ~0xffffffff
 .b32 ctx_dst_address_high           ~0x000000ff
-.b32 ctx_dst_address_low            ~0xfffffff0
+.b32 ctx_dst_address_low            ~0xffffffff
 .b32 ctx_src_pitch                  ~0x0007ffff
 .b32 ctx_dst_pitch                  ~0x0007ffff
 .b32 ctx_xcnt                       ~0x0000ffff
index 2731de2..e2a0e88 100644 (file)
@@ -1,37 +1,72 @@
-uint32_t nva3_pcopy_data[] = {
+u32 nva3_pcopy_data[] = {
+/* 0x0000: ctx_object */
        0x00000000,
+/* 0x0004: ctx_dma */
+/* 0x0004: ctx_dma_query */
        0x00000000,
+/* 0x0008: ctx_dma_src */
        0x00000000,
+/* 0x000c: ctx_dma_dst */
        0x00000000,
+/* 0x0010: ctx_query_address_high */
        0x00000000,
+/* 0x0014: ctx_query_address_low */
        0x00000000,
+/* 0x0018: ctx_query_counter */
        0x00000000,
+/* 0x001c: ctx_src_address_high */
        0x00000000,
+/* 0x0020: ctx_src_address_low */
        0x00000000,
+/* 0x0024: ctx_src_pitch */
        0x00000000,
+/* 0x0028: ctx_src_tile_mode */
        0x00000000,
+/* 0x002c: ctx_src_xsize */
        0x00000000,
+/* 0x0030: ctx_src_ysize */
        0x00000000,
+/* 0x0034: ctx_src_zsize */
        0x00000000,
+/* 0x0038: ctx_src_zoff */
        0x00000000,
+/* 0x003c: ctx_src_xoff */
        0x00000000,
+/* 0x0040: ctx_src_yoff */
        0x00000000,
+/* 0x0044: ctx_src_cpp */
        0x00000000,
+/* 0x0048: ctx_dst_address_high */
        0x00000000,
+/* 0x004c: ctx_dst_address_low */
        0x00000000,
+/* 0x0050: ctx_dst_pitch */
        0x00000000,
+/* 0x0054: ctx_dst_tile_mode */
        0x00000000,
+/* 0x0058: ctx_dst_xsize */
        0x00000000,
+/* 0x005c: ctx_dst_ysize */
        0x00000000,
+/* 0x0060: ctx_dst_zsize */
        0x00000000,
+/* 0x0064: ctx_dst_zoff */
        0x00000000,
+/* 0x0068: ctx_dst_xoff */
        0x00000000,
+/* 0x006c: ctx_dst_yoff */
        0x00000000,
+/* 0x0070: ctx_dst_cpp */
        0x00000000,
+/* 0x0074: ctx_format */
        0x00000000,
+/* 0x0078: ctx_swz_const0 */
        0x00000000,
+/* 0x007c: ctx_swz_const1 */
        0x00000000,
+/* 0x0080: ctx_xcnt */
        0x00000000,
+/* 0x0084: ctx_ycnt */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -63,6 +98,7 @@ uint32_t nva3_pcopy_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+/* 0x0100: dispatch_table */
        0x00010000,
        0x00000000,
        0x00000000,
@@ -73,6 +109,7 @@ uint32_t nva3_pcopy_data[] = {
        0x00010162,
        0x00000000,
        0x00030060,
+/* 0x0128: dispatch_dma */
        0x00010170,
        0x00000000,
        0x00010170,
@@ -118,11 +155,11 @@ uint32_t nva3_pcopy_data[] = {
        0x0000001c,
        0xffffff00,
        0x00000020,
-       0x0000000f,
+       0x00000000,
        0x00000048,
        0xffffff00,
        0x0000004c,
-       0x0000000f,
+       0x00000000,
        0x00000024,
        0xfff80000,
        0x00000050,
@@ -146,7 +183,8 @@ uint32_t nva3_pcopy_data[] = {
        0x00000800,
 };
 
-uint32_t nva3_pcopy_code[] = {
+u32 nva3_pcopy_code[] = {
+/* 0x0000: main */
        0x04fe04bd,
        0x3517f000,
        0xf10010fe,
@@ -158,23 +196,31 @@ uint32_t nva3_pcopy_code[] = {
        0x17f11031,
        0x27f01200,
        0x0012d003,
+/* 0x002f: spin */
        0xf40031f4,
        0x0ef40028,
+/* 0x0035: ih */
        0x8001cffd,
        0xf40812c4,
        0x21f4060b,
+/* 0x0041: ih_no_chsw */
        0x0412c472,
        0xf4060bf4,
+/* 0x004a: ih_no_cmd */
        0x11c4c321,
        0x4001d00c,
+/* 0x0052: swctx */
        0x47f101f8,
        0x4bfe7700,
        0x0007fe00,
        0xf00204b9,
        0x01f40643,
        0x0604fa09,
+/* 0x006b: swctx_load */
        0xfa060ef4,
+/* 0x006e: swctx_done */
        0x03f80504,
+/* 0x0072: chsw */
        0x27f100f8,
        0x23cf1400,
        0x1e3fc800,
@@ -183,18 +229,22 @@ uint32_t nva3_pcopy_code[] = {
        0x1e3af052,
        0xf00023d0,
        0x24d00147,
+/* 0x0093: chsw_no_unload */
        0xcf00f880,
        0x3dc84023,
        0x220bf41e,
        0xf40131f4,
        0x57f05221,
        0x0367f004,
+/* 0x00a8: chsw_load_ctx_dma */
        0xa07856bc,
        0xb6018068,
        0x87d00884,
        0x0162b600,
+/* 0x00bb: chsw_finish_load */
        0xf0f018f4,
        0x23d00237,
+/* 0x00c3: dispatch */
        0xf100f880,
        0xcf190037,
        0x33cf4032,
@@ -202,6 +252,7 @@ uint32_t nva3_pcopy_code[] = {
        0x1024b607,
        0x010057f1,
        0x74bd64bd,
+/* 0x00dc: dispatch_loop */
        0x58005658,
        0x50b60157,
        0x0446b804,
@@ -211,6 +262,7 @@ uint32_t nva3_pcopy_code[] = {
        0xb60276bb,
        0x57bb0374,
        0xdf0ef400,
+/* 0x0100: dispatch_valid_mthd */
        0xb60246bb,
        0x45bb0344,
        0x01459800,
@@ -220,31 +272,41 @@ uint32_t nva3_pcopy_code[] = {
        0xb0014658,
        0x1bf40064,
        0x00538009,
+/* 0x0127: dispatch_cmd */
        0xf4300ef4,
        0x55f90132,
        0xf40c01f4,
+/* 0x0132: dispatch_invalid_bitfield */
        0x25f0250e,
+/* 0x0135: dispatch_illegal_mthd */
        0x0125f002,
+/* 0x0138: dispatch_error */
        0x100047f1,
        0xd00042d0,
        0x27f04043,
        0x0002d040,
+/* 0x0148: hostirq_wait */
        0xf08002cf,
        0x24b04024,
        0xf71bf400,
+/* 0x0154: dispatch_done */
        0x1d0027f1,
        0xd00137f0,
        0x00f80023,
+/* 0x0160: cmd_nop */
+/* 0x0162: cmd_pm_trigger */
        0x27f100f8,
        0x34bd2200,
        0xd00233f0,
        0x00f80023,
+/* 0x0170: cmd_dma */
        0x012842b7,
        0xf00145b6,
        0x43801e39,
        0x0040b701,
        0x0644b606,
        0xf80043d0,
+/* 0x0189: cmd_exec_set_format */
        0xf030f400,
        0xb00001b0,
        0x01b00101,
@@ -256,20 +318,26 @@ uint32_t nva3_pcopy_code[] = {
        0x70b63847,
        0x0232f401,
        0x94bd84bd,
+/* 0x01b4: ncomp_loop */
        0xb60f4ac4,
        0xb4bd0445,
+/* 0x01bc: bpc_loop */
        0xf404a430,
        0xa5ff0f18,
        0x00cbbbc0,
        0xf40231f4,
+/* 0x01ce: cmp_c0 */
        0x1bf4220e,
        0x10c7f00c,
        0xf400cbbb,
+/* 0x01da: cmp_c1 */
        0xa430160e,
        0x0c18f406,
        0xbb14c7f0,
        0x0ef400cb,
+/* 0x01e9: cmp_zero */
        0x80c7f107,
+/* 0x01ed: bpc_next */
        0x01c83800,
        0xb60180b6,
        0xb5b801b0,
@@ -280,6 +348,7 @@ uint32_t nva3_pcopy_code[] = {
        0x98110680,
        0x68fd2008,
        0x0502f400,
+/* 0x0216: dst_xcnt */
        0x75fd64bd,
        0x1c078000,
        0xf10078fd,
@@ -304,6 +373,7 @@ uint32_t nva3_pcopy_code[] = {
        0x980056d0,
        0x56d01f06,
        0x1030f440,
+/* 0x0276: cmd_exec_set_surface_tiled */
        0x579800f8,
        0x6879c70a,
        0xb66478c7,
@@ -311,9 +381,11 @@ uint32_t nva3_pcopy_code[] = {
        0x0e76b060,
        0xf0091bf4,
        0x0ef40477,
+/* 0x0291: xtile64 */
        0x027cf00f,
        0xfd1170b6,
        0x77f00947,
+/* 0x029d: xtileok */
        0x0f5a9806,
        0xfd115b98,
        0xb7f000ab,
@@ -371,6 +443,7 @@ uint32_t nva3_pcopy_code[] = {
        0x67d00600,
        0x0060b700,
        0x0068d004,
+/* 0x0382: cmd_exec_set_surface_linear */
        0x6cf000f8,
        0x0260b702,
        0x0864b602,
@@ -381,13 +454,16 @@ uint32_t nva3_pcopy_code[] = {
        0xb70067d0,
        0x98040060,
        0x67d00957,
+/* 0x03ab: cmd_exec_wait */
        0xf900f800,
        0xf110f900,
        0xb6080007,
+/* 0x03b6: loop */
        0x01cf0604,
        0x0114f000,
        0xfcfa1bf4,
        0xf800fc10,
+/* 0x03c5: cmd_exec_query */
        0x0d34c800,
        0xf5701bf4,
        0xf103ab21,
@@ -417,6 +493,7 @@ uint32_t nva3_pcopy_code[] = {
        0x47f10153,
        0x44b60800,
        0x0045d006,
+/* 0x0438: query_counter */
        0x03ab21f5,
        0x080c47f1,
        0x980644b6,
@@ -439,11 +516,13 @@ uint32_t nva3_pcopy_code[] = {
        0x47f10153,
        0x44b60800,
        0x0045d006,
+/* 0x0492: cmd_exec */
        0x21f500f8,
        0x3fc803ab,
        0x0e0bf400,
        0x018921f5,
        0x020047f1,
+/* 0x04a7: cmd_exec_no_format */
        0xf11e0ef4,
        0xb6081067,
        0x77f00664,
@@ -451,19 +530,24 @@ uint32_t nva3_pcopy_code[] = {
        0x981c0780,
        0x67d02007,
        0x4067d000,
+/* 0x04c2: cmd_exec_init_src_surface */
        0x32f444bd,
        0xc854bd02,
        0x0bf4043f,
        0x8221f50a,
        0x0a0ef403,
+/* 0x04d4: src_tiled */
        0x027621f5,
+/* 0x04db: cmd_exec_init_dst_surface */
        0xf40749f0,
        0x57f00231,
        0x083fc82c,
        0xf50a0bf4,
        0xf4038221,
+/* 0x04ee: dst_tiled */
        0x21f50a0e,
        0x49f00276,
+/* 0x04f5: cmd_exec_kick */
        0x0057f108,
        0x0654b608,
        0xd0210698,
@@ -473,6 +557,8 @@ uint32_t nva3_pcopy_code[] = {
        0xc80054d0,
        0x0bf40c3f,
        0xc521f507,
+/* 0x0519: cmd_exec_done */
+/* 0x051b: cmd_wrcache_flush */
        0xf100f803,
        0xbd220027,
        0x0133f034,
index 4199038..9e87036 100644 (file)
@@ -1,34 +1,65 @@
-uint32_t nvc0_pcopy_data[] = {
+u32 nvc0_pcopy_data[] = {
+/* 0x0000: ctx_object */
        0x00000000,
+/* 0x0004: ctx_query_address_high */
        0x00000000,
+/* 0x0008: ctx_query_address_low */
        0x00000000,
+/* 0x000c: ctx_query_counter */
        0x00000000,
+/* 0x0010: ctx_src_address_high */
        0x00000000,
+/* 0x0014: ctx_src_address_low */
        0x00000000,
+/* 0x0018: ctx_src_pitch */
        0x00000000,
+/* 0x001c: ctx_src_tile_mode */
        0x00000000,
+/* 0x0020: ctx_src_xsize */
        0x00000000,
+/* 0x0024: ctx_src_ysize */
        0x00000000,
+/* 0x0028: ctx_src_zsize */
        0x00000000,
+/* 0x002c: ctx_src_zoff */
        0x00000000,
+/* 0x0030: ctx_src_xoff */
        0x00000000,
+/* 0x0034: ctx_src_yoff */
        0x00000000,
+/* 0x0038: ctx_src_cpp */
        0x00000000,
+/* 0x003c: ctx_dst_address_high */
        0x00000000,
+/* 0x0040: ctx_dst_address_low */
        0x00000000,
+/* 0x0044: ctx_dst_pitch */
        0x00000000,
+/* 0x0048: ctx_dst_tile_mode */
        0x00000000,
+/* 0x004c: ctx_dst_xsize */
        0x00000000,
+/* 0x0050: ctx_dst_ysize */
        0x00000000,
+/* 0x0054: ctx_dst_zsize */
        0x00000000,
+/* 0x0058: ctx_dst_zoff */
        0x00000000,
+/* 0x005c: ctx_dst_xoff */
        0x00000000,
+/* 0x0060: ctx_dst_yoff */
        0x00000000,
+/* 0x0064: ctx_dst_cpp */
        0x00000000,
+/* 0x0068: ctx_format */
        0x00000000,
+/* 0x006c: ctx_swz_const0 */
        0x00000000,
+/* 0x0070: ctx_swz_const1 */
        0x00000000,
+/* 0x0074: ctx_xcnt */
        0x00000000,
+/* 0x0078: ctx_ycnt */
        0x00000000,
        0x00000000,
        0x00000000,
@@ -63,6 +94,7 @@ uint32_t nvc0_pcopy_data[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+/* 0x0100: dispatch_table */
        0x00010000,
        0x00000000,
        0x00000000,
@@ -111,11 +143,11 @@ uint32_t nvc0_pcopy_data[] = {
        0x00000010,
        0xffffff00,
        0x00000014,
-       0x0000000f,
+       0x00000000,
        0x0000003c,
        0xffffff00,
        0x00000040,
-       0x0000000f,
+       0x00000000,
        0x00000018,
        0xfff80000,
        0x00000044,
@@ -139,7 +171,8 @@ uint32_t nvc0_pcopy_data[] = {
        0x00000800,
 };
 
-uint32_t nvc0_pcopy_code[] = {
+u32 nvc0_pcopy_code[] = {
+/* 0x0000: main */
        0x04fe04bd,
        0x3517f000,
        0xf10010fe,
@@ -151,15 +184,20 @@ uint32_t nvc0_pcopy_code[] = {
        0x17f11031,
        0x27f01200,
        0x0012d003,
+/* 0x002f: spin */
        0xf40031f4,
        0x0ef40028,
+/* 0x0035: ih */
        0x8001cffd,
        0xf40812c4,
        0x21f4060b,
+/* 0x0041: ih_no_chsw */
        0x0412c4ca,
        0xf5070bf4,
+/* 0x004b: ih_no_cmd */
        0xc4010221,
        0x01d00c11,
+/* 0x0053: swctx */
        0xf101f840,
        0xfe770047,
        0x47f1004b,
@@ -188,8 +226,11 @@ uint32_t nvc0_pcopy_code[] = {
        0xf00204b9,
        0x01f40643,
        0x0604fa09,
+/* 0x00c3: swctx_load */
        0xfa060ef4,
+/* 0x00c6: swctx_done */
        0x03f80504,
+/* 0x00ca: chsw */
        0x27f100f8,
        0x23cf1400,
        0x1e3fc800,
@@ -198,18 +239,22 @@ uint32_t nvc0_pcopy_code[] = {
        0x1e3af053,
        0xf00023d0,
        0x24d00147,
+/* 0x00eb: chsw_no_unload */
        0xcf00f880,
        0x3dc84023,
        0x090bf41e,
        0xf40131f4,
+/* 0x00fa: chsw_finish_load */
        0x37f05321,
        0x8023d002,
+/* 0x0102: dispatch */
        0x37f100f8,
        0x32cf1900,
        0x0033cf40,
        0x07ff24e4,
        0xf11024b6,
        0xbd010057,
+/* 0x011b: dispatch_loop */
        0x5874bd64,
        0x57580056,
        0x0450b601,
@@ -219,6 +264,7 @@ uint32_t nvc0_pcopy_code[] = {
        0xbb0f08f4,
        0x74b60276,
        0x0057bb03,
+/* 0x013f: dispatch_valid_mthd */
        0xbbdf0ef4,
        0x44b60246,
        0x0045bb03,
@@ -229,24 +275,33 @@ uint32_t nvc0_pcopy_code[] = {
        0x64b00146,
        0x091bf400,
        0xf4005380,
+/* 0x0166: dispatch_cmd */
        0x32f4300e,
        0xf455f901,
        0x0ef40c01,
+/* 0x0171: dispatch_invalid_bitfield */
        0x0225f025,
+/* 0x0174: dispatch_illegal_mthd */
+/* 0x0177: dispatch_error */
        0xf10125f0,
        0xd0100047,
        0x43d00042,
        0x4027f040,
+/* 0x0187: hostirq_wait */
        0xcf0002d0,
        0x24f08002,
        0x0024b040,
+/* 0x0193: dispatch_done */
        0xf1f71bf4,
        0xf01d0027,
        0x23d00137,
+/* 0x019f: cmd_nop */
        0xf800f800,
+/* 0x01a1: cmd_pm_trigger */
        0x0027f100,
        0xf034bd22,
        0x23d00233,
+/* 0x01af: cmd_exec_set_format */
        0xf400f800,
        0x01b0f030,
        0x0101b000,
@@ -258,20 +313,26 @@ uint32_t nvc0_pcopy_code[] = {
        0x3847c701,
        0xf40170b6,
        0x84bd0232,
+/* 0x01da: ncomp_loop */
        0x4ac494bd,
        0x0445b60f,
+/* 0x01e2: bpc_loop */
        0xa430b4bd,
        0x0f18f404,
        0xbbc0a5ff,
        0x31f400cb,
        0x220ef402,
+/* 0x01f4: cmp_c0 */
        0xf00c1bf4,
        0xcbbb10c7,
        0x160ef400,
+/* 0x0200: cmp_c1 */
        0xf406a430,
        0xc7f00c18,
        0x00cbbb14,
+/* 0x020f: cmp_zero */
        0xf1070ef4,
+/* 0x0213: bpc_next */
        0x380080c7,
        0x80b601c8,
        0x01b0b601,
@@ -283,6 +344,7 @@ uint32_t nvc0_pcopy_code[] = {
        0x1d08980e,
        0xf40068fd,
        0x64bd0502,
+/* 0x023c: dst_xcnt */
        0x800075fd,
        0x78fd1907,
        0x1057f100,
@@ -307,15 +369,18 @@ uint32_t nvc0_pcopy_code[] = {
        0x1c069800,
        0xf44056d0,
        0x00f81030,
+/* 0x029c: cmd_exec_set_surface_tiled */
        0xc7075798,
        0x78c76879,
        0x0380b664,
        0xb06077c7,
        0x1bf40e76,
        0x0477f009,
+/* 0x02b7: xtile64 */
        0xf00f0ef4,
        0x70b6027c,
        0x0947fd11,
+/* 0x02c3: xtileok */
        0x980677f0,
        0x5b980c5a,
        0x00abfd0e,
@@ -374,6 +439,7 @@ uint32_t nvc0_pcopy_code[] = {
        0xb70067d0,
        0xd0040060,
        0x00f80068,
+/* 0x03a8: cmd_exec_set_surface_linear */
        0xb7026cf0,
        0xb6020260,
        0x57980864,
@@ -384,12 +450,15 @@ uint32_t nvc0_pcopy_code[] = {
        0x0060b700,
        0x06579804,
        0xf80067d0,
+/* 0x03d1: cmd_exec_wait */
        0xf900f900,
        0x0007f110,
        0x0604b608,
+/* 0x03dc: loop */
        0xf00001cf,
        0x1bf40114,
        0xfc10fcfa,
+/* 0x03eb: cmd_exec_query */
        0xc800f800,
        0x1bf40d34,
        0xd121f570,
@@ -419,6 +488,7 @@ uint32_t nvc0_pcopy_code[] = {
        0x0153f026,
        0x080047f1,
        0xd00644b6,
+/* 0x045e: query_counter */
        0x21f50045,
        0x47f103d1,
        0x44b6080c,
@@ -442,11 +512,13 @@ uint32_t nvc0_pcopy_code[] = {
        0x080047f1,
        0xd00644b6,
        0x00f80045,
+/* 0x04b8: cmd_exec */
        0x03d121f5,
        0xf4003fc8,
        0x21f50e0b,
        0x47f101af,
        0x0ef40200,
+/* 0x04cd: cmd_exec_no_format */
        0x1067f11e,
        0x0664b608,
        0x800177f0,
@@ -454,18 +526,23 @@ uint32_t nvc0_pcopy_code[] = {
        0x1d079819,
        0xd00067d0,
        0x44bd4067,
+/* 0x04e8: cmd_exec_init_src_surface */
        0xbd0232f4,
        0x043fc854,
        0xf50a0bf4,
        0xf403a821,
+/* 0x04fa: src_tiled */
        0x21f50a0e,
        0x49f0029c,
+/* 0x0501: cmd_exec_init_dst_surface */
        0x0231f407,
        0xc82c57f0,
        0x0bf4083f,
        0xa821f50a,
        0x0a0ef403,
+/* 0x0514: dst_tiled */
        0x029c21f5,
+/* 0x051b: cmd_exec_kick */
        0xf10849f0,
        0xb6080057,
        0x06980654,
@@ -475,7 +552,9 @@ uint32_t nvc0_pcopy_code[] = {
        0x54d00546,
        0x0c3fc800,
        0xf5070bf4,
+/* 0x053f: cmd_exec_done */
        0xf803eb21,
+/* 0x0541: cmd_wrcache_flush */
        0x0027f100,
        0xf034bd22,
        0x23d00133,
index 552b436..3254d51 100644 (file)
@@ -22,6 +22,7 @@
  *
  * Authors: Dave Airlie
  *          Alex Deucher
+ *          Jerome Glisse
  */
 #include "drmP.h"
 #include "radeon_drm.h"
@@ -634,7 +635,6 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
        ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
                                        link_status, DP_LINK_STATUS_SIZE, 100);
        if (ret <= 0) {
-               DRM_ERROR("displayport link status failed\n");
                return false;
        }
 
@@ -812,8 +812,10 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
                else
                        mdelay(dp_info->rd_interval * 4);
 
-               if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
+               if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
+                       DRM_ERROR("displayport link status failed\n");
                        break;
+               }
 
                if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
                        clock_recovery = true;
@@ -875,8 +877,10 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
                else
                        mdelay(dp_info->rd_interval * 4);
 
-               if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
+               if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
+                       DRM_ERROR("displayport link status failed\n");
                        break;
+               }
 
                if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
                        channel_eq = true;
index 4a4493f..87d494d 100644 (file)
@@ -64,14 +64,33 @@ void radeon_connector_hotplug(struct drm_connector *connector)
 
        /* just deal with DP (not eDP) here. */
        if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
-               int saved_dpms = connector->dpms;
-
-               /* Only turn off the display it it's physically disconnected */
-               if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
-                       drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
-               else if (radeon_dp_needs_link_train(radeon_connector))
-                       drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
-               connector->dpms = saved_dpms;
+               struct radeon_connector_atom_dig *dig_connector =
+                       radeon_connector->con_priv;
+
+               /* if existing sink type was not DP no need to retrain */
+               if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
+                       return;
+
+               /* first get sink type as it may be reset after (un)plug */
+               dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+               /* don't do anything if sink is not display port, i.e.,
+                * passive dp->(dvi|hdmi) adaptor
+                */
+               if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+                       int saved_dpms = connector->dpms;
+                       /* Only turn off the display if it's physically disconnected */
+                       if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+                               drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+                       } else if (radeon_dp_needs_link_train(radeon_connector)) {
+                               /* set it to OFF so that drm_helper_connector_dpms()
+                                * won't return immediately since the current state
+                                * is ON at this point.
+                                */
+                               connector->dpms = DRM_MODE_DPMS_OFF;
+                               drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+                       }
+                       connector->dpms = saved_dpms;
+               }
        }
 }
 
index 986d608..2132109 100644 (file)
@@ -257,8 +257,14 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
                                if (!(cursor_end & 0x7f))
                                        w--;
                        }
-                       if (w <= 0)
+                       if (w <= 0) {
                                w = 1;
+                               cursor_end = x - xorigin + w;
+                               if (!(cursor_end & 0x7f)) {
+                                       x--;
+                                       WARN_ON_ONCE(x < 0);
+                               }
+                       }
                }
        }
 
index f3ae607..39497c7 100644 (file)
@@ -117,7 +117,6 @@ int radeon_bo_create(struct radeon_device *rdev,
                return -ENOMEM;
        }
 
-retry:
        bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
@@ -130,6 +129,8 @@ retry:
        bo->gem_base.driver_private = NULL;
        bo->surface_reg = -1;
        INIT_LIST_HEAD(&bo->list);
+
+retry:
        radeon_ttm_placement_from_domain(bo, domain);
        /* Kernel allocation are uninterruptible */
        mutex_lock(&rdev->vram_mutex);
index 299d238..899c712 100644 (file)
@@ -514,6 +514,12 @@ static const struct hid_device_id apple_devices[] = {
                .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
                .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
+               .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
                .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
index c27b402..95430a0 100644 (file)
@@ -1374,6 +1374,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -1884,6 +1887,7 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
@@ -1968,6 +1972,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
        { }
index fba3fc4..7db934d 100644 (file)
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI   0x024c
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO    0x024d
 #define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS    0x024e
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI   0x0262
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO    0x0263
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS    0x0264
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI  0x0239
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO   0x023a
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS   0x023b
 #define USB_DEVICE_ID_CRYSTALTOUCH     0x0006
 #define USB_DEVICE_ID_CRYSTALTOUCH_DUAL        0x0007
 
+#define USB_VENDOR_ID_MADCATZ          0x0738
+#define USB_DEVICE_ID_MADCATZ_BEATPAD  0x4540
+
 #define USB_VENDOR_ID_MCC              0x09db
 #define USB_DEVICE_ID_MCC_PMD1024LS    0x0076
 #define USB_DEVICE_ID_MCC_PMD1208LS    0x007a
index 4c07436..d99aa84 100644 (file)
@@ -215,7 +215,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
        int i;
 
        if (send_command(cmd) || send_argument(key)) {
-               pr_warn("%s: read arg fail\n", key);
+               pr_warn("%.4s: read arg fail\n", key);
                return -EIO;
        }
 
@@ -223,7 +223,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
 
        for (i = 0; i < len; i++) {
                if (__wait_status(0x05)) {
-                       pr_warn("%s: read data fail\n", key);
+                       pr_warn("%.4s: read data fail\n", key);
                        return -EIO;
                }
                buffer[i] = inb(APPLESMC_DATA_PORT);
index 427468f..19b4412 100644 (file)
@@ -57,16 +57,15 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
 #define TOTAL_ATTRS            (MAX_CORE_ATTRS + 1)
 #define MAX_CORE_DATA          (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
 
-#ifdef CONFIG_SMP
 #define TO_PHYS_ID(cpu)                cpu_data(cpu).phys_proc_id
 #define TO_CORE_ID(cpu)                cpu_data(cpu).cpu_core_id
+#define TO_ATTR_NO(cpu)                (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
+
+#ifdef CONFIG_SMP
 #define for_each_sibling(i, cpu)       for_each_cpu(i, cpu_sibling_mask(cpu))
 #else
-#define TO_PHYS_ID(cpu)                (cpu)
-#define TO_CORE_ID(cpu)                (cpu)
 #define for_each_sibling(i, cpu)       for (i = 0; false; )
 #endif
-#define TO_ATTR_NO(cpu)                (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
 
 /*
  * Per-Core Temperature Data
@@ -660,7 +659,7 @@ static void __cpuinit get_core_online(unsigned int cpu)
         * sensors. We check this bit only, all the early CPUs
         * without thermal sensors will be filtered out.
         */
-       if (!cpu_has(c, X86_FEATURE_DTS))
+       if (!cpu_has(c, X86_FEATURE_DTHERM))
                return;
 
        if (!pdev) {
index d912649..1ba7af2 100644 (file)
@@ -2086,7 +2086,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
 
        /* Start monitoring */
        it87_write_value(data, IT87_REG_CONFIG,
-                        (it87_read_value(data, IT87_REG_CONFIG) & 0x36)
+                        (it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
                         | (update_vbat ? 0x41 : 0x01));
 }
 
index 61c9cf1..1201a15 100644 (file)
@@ -345,7 +345,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
                spin_lock_init(&hwlock->lock);
                hwlock->bank = bank;
 
-               ret = hwspin_lock_register_single(hwlock, i);
+               ret = hwspin_lock_register_single(hwlock, base_id + i);
                if (ret)
                        goto reg_failed;
        }
@@ -354,7 +354,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
 
 reg_failed:
        while (--i >= 0)
-               hwspin_lock_unregister_single(i);
+               hwspin_lock_unregister_single(base_id + i);
        return ret;
 }
 EXPORT_SYMBOL_GPL(hwspin_lock_register);
index d728875..2189cbf 100644 (file)
@@ -142,6 +142,7 @@ static const struct xpad_device {
        { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
        { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
        { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
+       { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
        { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
        { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
        { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
@@ -164,6 +165,7 @@ static const struct xpad_device {
        { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+       { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
        { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
        { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
 };
@@ -238,12 +240,14 @@ static struct usb_device_id xpad_table [] = {
        XPAD_XBOX360_VENDOR(0x045e),            /* Microsoft X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x046d),            /* Logitech X-Box 360 style controllers */
        XPAD_XBOX360_VENDOR(0x0738),            /* Mad Catz X-Box 360 controllers */
+       { USB_DEVICE(0x0738, 0x4540) },         /* Mad Catz Beat Pad */
        XPAD_XBOX360_VENDOR(0x0e6f),            /* 0x0e6f X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x12ab),            /* X-Box 360 dance pads */
        XPAD_XBOX360_VENDOR(0x1430),            /* RedOctane X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x146b),            /* BigBen Interactive Controllers */
        XPAD_XBOX360_VENDOR(0x1bad),            /* Harminix Rock Band Guitar and Drums */
-       XPAD_XBOX360_VENDOR(0x0f0d),            /* Hori Controllers */
+       XPAD_XBOX360_VENDOR(0x0f0d),            /* Hori Controllers */
+       XPAD_XBOX360_VENDOR(0x1689),            /* Razer Onza */
        { }
 };
 
index 5ec617e..ec58f48 100644 (file)
 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI  0x0252
 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO   0x0253
 #define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS   0x0254
+/* MacbookPro10,1 (unibody, June 2012) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI   0x0262
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO    0x0263
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS    0x0264
 
 #define BCM5974_DEVICE(prod) {                                 \
        .match_flags = (USB_DEVICE_ID_MATCH_DEVICE |            \
@@ -128,6 +132,10 @@ static const struct usb_device_id bcm5974_table[] = {
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
+       /* MacbookPro10,1 */
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
        /* Terminating entry */
        {}
 };
@@ -354,6 +362,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
                { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
        },
+       {
+               USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI,
+               USB_DEVICE_ID_APPLE_WELLSPRING7_ISO,
+               USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
+               HAS_INTEGRATED_BUTTON,
+               0x84, sizeof(struct bt_data),
+               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+               { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+               { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
+               { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
+       },
        {}
 };
 
index f1d5408..0f074e0 100644 (file)
@@ -59,6 +59,8 @@ static struct protection_domain *pt_domain;
 
 static struct iommu_ops amd_iommu_ops;
 
+static struct dma_map_ops amd_iommu_dma_ops;
+
 /*
  * general struct to manage commands send to an IOMMU
  */
@@ -1863,6 +1865,11 @@ static int device_change_notifier(struct notifier_block *nb,
 
                iommu_init_device(dev);
 
+               if (iommu_pass_through) {
+                       attach_device(dev, pt_domain);
+                       break;
+               }
+
                domain = domain_for_device(dev);
 
                /* allocate a protection domain if a device is added */
@@ -1878,6 +1885,8 @@ static int device_change_notifier(struct notifier_block *nb,
                list_add_tail(&dma_domain->list, &iommu_pd_list);
                spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
 
+               dev->archdata.dma_ops = &amd_iommu_dma_ops;
+
                break;
        case BUS_NOTIFY_DEL_DEVICE:
 
index 6269eb0..ef2d493 100644 (file)
@@ -1468,6 +1468,8 @@ static int __init amd_iommu_init(void)
 
        register_syscore_ops(&amd_iommu_syscore_ops);
 
+       x86_platform.iommu_shutdown = disable_iommus;
+
        if (iommu_pass_through)
                goto out;
 
@@ -1476,7 +1478,6 @@ static int __init amd_iommu_init(void)
        else
                printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
 
-       x86_platform.iommu_shutdown = disable_iommus;
 out:
        return ret;
 
index 9bfd057..dae2b7a 100644 (file)
@@ -1080,6 +1080,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        ti->split_io = dm_rh_get_region_size(ms->rh);
        ti->num_flush_requests = 1;
        ti->num_discard_requests = 1;
+       ti->discard_zeroes_data_unsupported = 1;
 
        ms->kmirrord_wq = alloc_workqueue("kmirrord",
                                          WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
@@ -1210,7 +1211,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
         * We need to dec pending if this was a write.
         */
        if (rw == WRITE) {
-               if (!(bio->bi_rw & REQ_FLUSH))
+               if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
                        dm_rh_dec(ms->rh, map_context->ll);
                return error;
        }
index 7771ed2..69732e0 100644 (file)
@@ -404,6 +404,9 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
                return;
        }
 
+       if (bio->bi_rw & REQ_DISCARD)
+               return;
+
        /* We must inform the log that the sync count has changed. */
        log->type->set_region_sync(log, region, 0);
 
@@ -524,7 +527,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
        struct bio *bio;
 
        for (bio = bios->head; bio; bio = bio->bi_next) {
-               if (bio->bi_rw & REQ_FLUSH)
+               if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))
                        continue;
                rh_inc(rh, dm_rh_bio_to_region(rh, bio));
        }
index da2f021..532a902 100644 (file)
@@ -288,8 +288,10 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates)
 
        hlist_del(&cell->list);
 
-       bio_list_add(inmates, cell->holder);
-       bio_list_merge(inmates, &cell->bios);
+       if (inmates) {
+               bio_list_add(inmates, cell->holder);
+               bio_list_merge(inmates, &cell->bios);
+       }
 
        mempool_free(cell, prison->cell_pool);
 }
@@ -312,9 +314,10 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
  */
 static void __cell_release_singleton(struct cell *cell, struct bio *bio)
 {
-       hlist_del(&cell->list);
        BUG_ON(cell->holder != bio);
        BUG_ON(!bio_list_empty(&cell->bios));
+
+       __cell_release(cell, NULL);
 }
 
 static void cell_release_singleton(struct cell *cell, struct bio *bio)
index 700ecae..d8646d7 100644 (file)
@@ -3700,8 +3700,8 @@ array_state_show(struct mddev *mddev, char *page)
        return sprintf(page, "%s\n", array_states[st]);
 }
 
-static int do_md_stop(struct mddev * mddev, int ro, int is_open);
-static int md_set_readonly(struct mddev * mddev, int is_open);
+static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
+static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
 static int do_md_run(struct mddev * mddev);
 static int restart_array(struct mddev *mddev);
 
@@ -3717,14 +3717,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
                /* stopping an active array */
                if (atomic_read(&mddev->openers) > 0)
                        return -EBUSY;
-               err = do_md_stop(mddev, 0, 0);
+               err = do_md_stop(mddev, 0, NULL);
                break;
        case inactive:
                /* stopping an active array */
                if (mddev->pers) {
                        if (atomic_read(&mddev->openers) > 0)
                                return -EBUSY;
-                       err = do_md_stop(mddev, 2, 0);
+                       err = do_md_stop(mddev, 2, NULL);
                } else
                        err = 0; /* already inactive */
                break;
@@ -3732,7 +3732,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
                break; /* not supported yet */
        case readonly:
                if (mddev->pers)
-                       err = md_set_readonly(mddev, 0);
+                       err = md_set_readonly(mddev, NULL);
                else {
                        mddev->ro = 1;
                        set_disk_ro(mddev->gendisk, 1);
@@ -3742,7 +3742,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
        case read_auto:
                if (mddev->pers) {
                        if (mddev->ro == 0)
-                               err = md_set_readonly(mddev, 0);
+                               err = md_set_readonly(mddev, NULL);
                        else if (mddev->ro == 1)
                                err = restart_array(mddev);
                        if (err == 0) {
@@ -5078,15 +5078,17 @@ void md_stop(struct mddev *mddev)
 }
 EXPORT_SYMBOL_GPL(md_stop);
 
-static int md_set_readonly(struct mddev *mddev, int is_open)
+static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
 {
        int err = 0;
        mutex_lock(&mddev->open_mutex);
-       if (atomic_read(&mddev->openers) > is_open) {
+       if (atomic_read(&mddev->openers) > !!bdev) {
                printk("md: %s still in use.\n",mdname(mddev));
                err = -EBUSY;
                goto out;
        }
+       if (bdev)
+               sync_blockdev(bdev);
        if (mddev->pers) {
                __md_stop_writes(mddev);
 
@@ -5108,18 +5110,26 @@ out:
  *   0 - completely stop and dis-assemble array
  *   2 - stop but do not disassemble array
  */
-static int do_md_stop(struct mddev * mddev, int mode, int is_open)
+static int do_md_stop(struct mddev * mddev, int mode,
+                     struct block_device *bdev)
 {
        struct gendisk *disk = mddev->gendisk;
        struct md_rdev *rdev;
 
        mutex_lock(&mddev->open_mutex);
-       if (atomic_read(&mddev->openers) > is_open ||
+       if (atomic_read(&mddev->openers) > !!bdev ||
            mddev->sysfs_active) {
                printk("md: %s still in use.\n",mdname(mddev));
                mutex_unlock(&mddev->open_mutex);
                return -EBUSY;
        }
+       if (bdev)
+               /* It is possible IO was issued on some other
+                * open file which was closed before we took ->open_mutex.
+                * As that was not the last close __blkdev_put will not
+                * have called sync_blockdev, so we must.
+                */
+               sync_blockdev(bdev);
 
        if (mddev->pers) {
                if (mddev->ro)
@@ -5193,7 +5203,7 @@ static void autorun_array(struct mddev *mddev)
        err = do_md_run(mddev);
        if (err) {
                printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
-               do_md_stop(mddev, 0, 0);
+               do_md_stop(mddev, 0, NULL);
        }
 }
 
@@ -6184,11 +6194,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
                        goto done_unlock;
 
                case STOP_ARRAY:
-                       err = do_md_stop(mddev, 0, 1);
+                       err = do_md_stop(mddev, 0, bdev);
                        goto done_unlock;
 
                case STOP_ARRAY_RO:
-                       err = md_set_readonly(mddev, 1);
+                       err = md_set_readonly(mddev, bdev);
                        goto done_unlock;
 
                case BLKROSET:
index 50ed53b..fc90c11 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/device-mapper.h>
 #include <linux/export.h>
+#include <linux/vmalloc.h>
 
 #ifdef CONFIG_DM_DEBUG_SPACE_MAPS
 
@@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm)
 
        ca->nr = nr_blocks;
        ca->nr_free = nr_blocks;
-       ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL);
-       if (!ca->counts)
-               return -ENOMEM;
+
+       if (!nr_blocks)
+               ca->counts = NULL;
+       else {
+               ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks);
+               if (!ca->counts)
+                       return -ENOMEM;
+       }
 
        return 0;
 }
 
+static void ca_destroy(struct count_array *ca)
+{
+       vfree(ca->counts);
+}
+
 static int ca_load(struct count_array *ca, struct dm_space_map *sm)
 {
        int r;
@@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
 static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
 {
        dm_block_t nr_blocks = ca->nr + extra_blocks;
-       uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL);
+       uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks);
        if (!counts)
                return -ENOMEM;
 
-       memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
-       kfree(ca->counts);
+       if (ca->counts) {
+               memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
+               ca_destroy(ca);
+       }
        ca->nr = nr_blocks;
        ca->nr_free += extra_blocks;
        ca->counts = counts;
@@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new)
        return 0;
 }
 
-static void ca_destroy(struct count_array *ca)
-{
-       kfree(ca->counts);
-}
-
 /*----------------------------------------------------------------*/
 
 struct sm_checker {
@@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
        int r;
        struct sm_checker *smc;
 
-       if (!sm)
-               return NULL;
+       if (IS_ERR_OR_NULL(sm))
+               return ERR_PTR(-EINVAL);
 
        smc = kmalloc(sizeof(*smc), GFP_KERNEL);
        if (!smc)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        memcpy(&smc->sm, &ops_, sizeof(smc->sm));
        r = ca_create(&smc->old_counts, sm);
        if (r) {
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_create(&smc->counts, sm);
        if (r) {
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        smc->real_sm = sm;
@@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
                ca_destroy(&smc->counts);
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_commit(&smc->old_counts, &smc->counts);
@@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
                ca_destroy(&smc->counts);
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        return &smc->sm;
@@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
        int r;
        struct sm_checker *smc;
 
-       if (!sm)
-               return NULL;
+       if (IS_ERR_OR_NULL(sm))
+               return ERR_PTR(-EINVAL);
 
        smc = kmalloc(sizeof(*smc), GFP_KERNEL);
        if (!smc)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        memcpy(&smc->sm, &ops_, sizeof(smc->sm));
        r = ca_create(&smc->old_counts, sm);
        if (r) {
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        r = ca_create(&smc->counts, sm);
        if (r) {
                ca_destroy(&smc->old_counts);
                kfree(smc);
-               return NULL;
+               return ERR_PTR(r);
        }
 
        smc->real_sm = sm;
index fc469ba..3d0ed53 100644 (file)
@@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
                                       dm_block_t nr_blocks)
 {
        struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
-       return dm_sm_checker_create_fresh(sm);
+       struct dm_space_map *smc;
+
+       if (IS_ERR_OR_NULL(sm))
+               return sm;
+
+       smc = dm_sm_checker_create_fresh(sm);
+       if (IS_ERR(smc))
+               dm_sm_destroy(sm);
+
+       return smc;
 }
 EXPORT_SYMBOL_GPL(dm_sm_disk_create);
 
index 6f8d387..ba54aac 100644 (file)
@@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
 
 void dm_tm_destroy(struct dm_transaction_manager *tm)
 {
+       if (!tm->is_clone)
+               wipe_shadow_table(tm);
+
        kfree(tm);
 }
 EXPORT_SYMBOL_GPL(dm_tm_destroy);
@@ -342,8 +345,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
                }
 
                *sm = dm_sm_checker_create(inner);
-               if (!*sm)
+               if (IS_ERR(*sm)) {
+                       r = PTR_ERR(*sm);
                        goto bad2;
+               }
 
        } else {
                r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
@@ -362,8 +367,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
                }
 
                *sm = dm_sm_checker_create(inner);
-               if (!*sm)
+               if (IS_ERR(*sm)) {
+                       r = PTR_ERR(*sm);
                        goto bad2;
+               }
        }
 
        return 0;
index 7af60ec..2d97bf0 100644 (file)
@@ -1713,8 +1713,14 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
 
        if (atomic_dec_and_test(&r1_bio->remaining)) {
                /* if we're here, all write(s) have completed, so clean up */
-               md_done_sync(mddev, r1_bio->sectors, 1);
-               put_buf(r1_bio);
+               int s = r1_bio->sectors;
+               if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
+                   test_bit(R1BIO_WriteError, &r1_bio->state))
+                       reschedule_retry(r1_bio);
+               else {
+                       put_buf(r1_bio);
+                       md_done_sync(mddev, s, 1);
+               }
        }
 }
 
@@ -2378,9 +2384,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
         */
        if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
                atomic_set(&r1_bio->remaining, read_targets);
-               for (i=0; i<conf->raid_disks; i++) {
+               for (i = 0; i < conf->raid_disks && read_targets; i++) {
                        bio = r1_bio->bios[i];
                        if (bio->bi_end_io == end_sync_read) {
+                               read_targets--;
                                md_sync_acct(bio->bi_bdev, nr_sectors);
                                generic_make_request(bio);
                        }
index b219449..7a9eef6 100644 (file)
@@ -1919,7 +1919,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        if (r10_sync_page_io(rdev,
                                             r10_bio->devs[sl].addr +
                                             sect,
-                                            s<<9, conf->tmppage, WRITE)
+                                            s, conf->tmppage, WRITE)
                            == 0) {
                                /* Well, this device is dead */
                                printk(KERN_NOTICE
@@ -1956,7 +1956,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        switch (r10_sync_page_io(rdev,
                                             r10_bio->devs[sl].addr +
                                             sect,
-                                            s<<9, conf->tmppage,
+                                            s, conf->tmppage,
                                                 READ)) {
                        case 0:
                                /* Well, this device is dead */
@@ -2119,7 +2119,7 @@ read_more:
        rdev = conf->mirrors[mirror].rdev;
        printk_ratelimited(
                KERN_ERR
-               "md/raid10:%s: %s: redirecting"
+               "md/raid10:%s: %s: redirecting "
                "sector %llu to another mirror\n",
                mdname(mddev),
                bdevname(rdev->bdev, b),
@@ -2436,6 +2436,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                        /* want to reconstruct this device */
                        rb2 = r10_bio;
                        sect = raid10_find_virt(conf, sector_nr, i);
+                       if (sect >= mddev->resync_max_sectors) {
+                               /* last stripe is not complete - don't
+                                * try to recover this sector.
+                                */
+                               continue;
+                       }
                        /* Unless we are doing a full sync, we only need
                         * to recover the block if it is set in the bitmap
                         */
index 858fdbb..26ef63a 100644 (file)
@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
                BUG_ON(!list_empty(&sh->lru));
                BUG_ON(atomic_read(&conf->active_stripes)==0);
                if (test_bit(STRIPE_HANDLE, &sh->state)) {
-                       if (test_bit(STRIPE_DELAYED, &sh->state))
+                       if (test_bit(STRIPE_DELAYED, &sh->state) &&
+                           !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                list_add_tail(&sh->lru, &conf->delayed_list);
                        else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
                                   sh->bm_seq - conf->seq_write > 0)
                                list_add_tail(&sh->lru, &conf->bitmap_list);
                        else {
+                               clear_bit(STRIPE_DELAYED, &sh->state);
                                clear_bit(STRIPE_BIT_DELAY, &sh->state);
                                list_add_tail(&sh->lru, &conf->handle_list);
                        }
@@ -542,6 +544,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                                         * a chance*/
                                        md_check_recovery(conf->mddev);
                                }
+                               /*
+                                * Because md_wait_for_blocked_rdev
+                                * will dec nr_pending, we must
+                                * increment it first.
+                                */
+                               atomic_inc(&rdev->nr_pending);
                                md_wait_for_blocked_rdev(rdev, conf->mddev);
                        } else {
                                /* Acknowledged bad block - skip the write */
@@ -3621,7 +3629,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                raid_bio->bi_next = (void*)rdev;
                align_bi->bi_bdev =  rdev->bdev;
                align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
-               align_bi->bi_sector += rdev->data_offset;
 
                if (!bio_fits_rdev(align_bi) ||
                    is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
@@ -3632,6 +3639,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                        return 0;
                }
 
+               /* No reshape active, so we can trust rdev->data_offset */
+               align_bi->bi_sector += rdev->data_offset;
+
                spin_lock_irq(&conf->device_lock);
                wait_event_lock_irq(conf->wait_for_stripe,
                                    conf->quiesce == 0,
index f732877..d5cda35 100644 (file)
@@ -243,6 +243,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
        if (minor == MAX_DVB_MINORS) {
                kfree(dvbdevfops);
                kfree(dvbdev);
+               up_write(&minor_rwsem);
                mutex_unlock(&dvbdev_register_lock);
                return -EINVAL;
        }
index b7d1e3e..fb68805 100644 (file)
@@ -544,6 +544,8 @@ static const struct usb_device_id smsusb_id_table[] __devinitconst = {
                .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
        { USB_DEVICE(0x2040, 0xc0a0),
                .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+       { USB_DEVICE(0x2040, 0xf5a0),
+               .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
        { } /* Terminating entry */
        };
 
index a7fa38f..e572ce5 100644 (file)
@@ -914,9 +914,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
        list_add_tail(&dev->devlist, &cx25821_devlist);
        mutex_unlock(&cx25821_devlist_mutex);
 
-       strcpy(cx25821_boards[UNKNOWN_BOARD].name, "unknown");
-       strcpy(cx25821_boards[CX25821_BOARD].name, "cx25821");
-
        if (dev->pci->device != 0x8210) {
                pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
                        __func__, dev->pci->device);
index 2d2d009..bf54360 100644 (file)
@@ -187,7 +187,7 @@ enum port {
 };
 
 struct cx25821_board {
-       char *name;
+       const char *name;
        enum port porta;
        enum port portb;
        enum port portc;
index 2ca10df..981501f 100644 (file)
@@ -1697,7 +1697,7 @@ static int vidioc_streamoff(struct file *file, void *priv,
                                enum v4l2_buf_type buf_type)
 {
        struct gspca_dev *gspca_dev = priv;
-       int ret;
+       int i, ret;
 
        if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
                return -EINVAL;
@@ -1728,6 +1728,8 @@ static int vidioc_streamoff(struct file *file, void *priv,
        wake_up_interruptible(&gspca_dev->wq);
 
        /* empty the transfer queues */
+       for (i = 0; i < gspca_dev->nframes; i++)
+               gspca_dev->frame[i].v4l2_buf.flags &= ~BUF_ALL_FLAGS;
        atomic_set(&gspca_dev->fr_q, 0);
        atomic_set(&gspca_dev->fr_i, 0);
        gspca_dev->fr_o = 0;
index 6878a94..83b51b5 100644 (file)
@@ -148,6 +148,7 @@ static const struct sdhci_pci_fixes sdhci_ene_714 = {
 static const struct sdhci_pci_fixes sdhci_cafe = {
        .quirks         = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
                          SDHCI_QUIRK_NO_BUSY_IRQ |
+                         SDHCI_QUIRK_BROKEN_CARD_DETECTION |
                          SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
 };
 
index 72d3f23..68ecf48 100644 (file)
@@ -102,7 +102,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
 static int cafe_device_ready(struct mtd_info *mtd)
 {
        struct cafe_priv *cafe = mtd->priv;
-       int result = !!(cafe_readl(cafe, NAND_STATUS) | 0x40000000);
+       int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
        uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
 
        cafe_writel(cafe, irqs, NAND_IRQ);
index 34c03be..83e8e1b 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/vmalloc.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/string.h>
@@ -547,12 +547,6 @@ static char *get_partition_name(int i)
        return kstrdup(buf, GFP_KERNEL);
 }
 
-static uint64_t divide(uint64_t n, uint32_t d)
-{
-       do_div(n, d);
-       return n;
-}
-
 /*
  * Initialize the nandsim structure.
  *
@@ -581,7 +575,7 @@ static int init_nandsim(struct mtd_info *mtd)
        ns->geom.oobsz    = mtd->oobsize;
        ns->geom.secsz    = mtd->erasesize;
        ns->geom.pgszoob  = ns->geom.pgsz + ns->geom.oobsz;
-       ns->geom.pgnum    = divide(ns->geom.totsz, ns->geom.pgsz);
+       ns->geom.pgnum    = div_u64(ns->geom.totsz, ns->geom.pgsz);
        ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
        ns->geom.secshift = ffs(ns->geom.secsz) - 1;
        ns->geom.pgshift  = chip->page_shift;
@@ -924,7 +918,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
 
        if (!rptwear)
                return 0;
-       wear_eb_count = divide(mtd->size, mtd->erasesize);
+       wear_eb_count = div_u64(mtd->size, mtd->erasesize);
        mem = wear_eb_count * sizeof(unsigned long);
        if (mem / sizeof(unsigned long) != wear_eb_count) {
                NS_ERR("Too many erase blocks for wear reporting\n");
index 3680aa2..2cf084e 100644 (file)
@@ -6,7 +6,7 @@
 #include "bonding.h"
 #include "bond_alb.h"
 
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
 
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
index f65e0b9..6c284d1 100644 (file)
@@ -77,6 +77,7 @@
 #include <net/route.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
+#include <net/pkt_sched.h>
 #include "bonding.h"
 #include "bond_3ad.h"
 #include "bond_alb.h"
@@ -382,8 +383,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
        return next;
 }
 
-#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
-
 /**
  * bond_dev_queue_xmit - Prepare skb for xmit.
  *
@@ -396,7 +395,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
 {
        skb->dev = slave_dev;
 
-       skb->queue_mapping = bond_queue_mapping(skb);
+       BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
+                    sizeof(qdisc_skb_cb(skb)->bond_queue_mapping));
+       skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping;
 
        if (unlikely(netpoll_tx_running(slave_dev)))
                bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
@@ -3183,6 +3184,12 @@ static int bond_master_netdev_event(unsigned long event,
        switch (event) {
        case NETDEV_CHANGENAME:
                return bond_event_changename(event_bond);
+       case NETDEV_UNREGISTER:
+               bond_remove_proc_entry(event_bond);
+               break;
+       case NETDEV_REGISTER:
+               bond_create_proc_entry(event_bond);
+               break;
        default:
                break;
        }
@@ -4151,7 +4158,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
        /*
         * Save the original txq to restore before passing to the driver
         */
-       bond_queue_mapping(skb) = skb->queue_mapping;
+       qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping;
 
        if (unlikely(txq >= dev->real_num_tx_queues)) {
                do {
@@ -4390,8 +4397,6 @@ static void bond_uninit(struct net_device *bond_dev)
 
        bond_work_cancel_all(bond);
 
-       bond_remove_proc_entry(bond);
-
        bond_debug_unregister(bond);
 
        __hw_addr_flush(&bond->mc_list);
@@ -4793,7 +4798,6 @@ static int bond_init(struct net_device *bond_dev)
 
        bond_set_lockdep_class(bond_dev);
 
-       bond_create_proc_entry(bond);
        list_add_tail(&bond->bond_list, &bn->dev_list);
 
        bond_prepare_sysfs_group(bond);
index 8dc84d6..86cd532 100644 (file)
@@ -590,8 +590,8 @@ static void c_can_chip_config(struct net_device *dev)
        priv->write_reg(priv, &priv->regs->control,
                        CONTROL_ENABLE_AR);
 
-       if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
-                                       CAN_CTRLMODE_LOOPBACK)) {
+       if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
+           (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
                /* loopback + silent mode : useful for hot self-test */
                priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
                                CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
index e023379..e59d006 100644 (file)
@@ -933,12 +933,12 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
        u32 clock_freq = 0;
 
        if (pdev->dev.of_node) {
-               const u32 *clock_freq_p;
+               const __be32 *clock_freq_p;
 
                clock_freq_p = of_get_property(pdev->dev.of_node,
                                                "clock-frequency", NULL);
                if (clock_freq_p)
-                       clock_freq = *clock_freq_p;
+                       clock_freq = be32_to_cpup(clock_freq_p);
        }
 
        if (!clock_freq) {
index eeac9ca..68fe73c 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/rtnetlink.h>
 #include <net/rtnetlink.h>
 #include <linux/u64_stats_sync.h>
+#include <linux/sched.h>
 
 static int numdummies = 1;
 
@@ -186,8 +187,10 @@ static int __init dummy_init_module(void)
        rtnl_lock();
        err = __rtnl_link_register(&dummy_link_ops);
 
-       for (i = 0; i < numdummies && !err; i++)
+       for (i = 0; i < numdummies && !err; i++) {
                err = dummy_init_one();
+               cond_resched();
+       }
        if (err < 0)
                __rtnl_link_unregister(&dummy_link_ops);
        rtnl_unlock();
index eccdcff..5ae7df7 100644 (file)
@@ -267,7 +267,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
                                dev_warn(&pdev->dev, "stop mac failed\n");
                atl1c_set_aspm(hw, false);
                netif_carrier_off(netdev);
-               netif_stop_queue(netdev);
                atl1c_phy_reset(hw);
                atl1c_phy_init(&adapter->hw);
        } else {
index aec7212..8dda46a 100644 (file)
@@ -723,21 +723,6 @@ struct bnx2x_fastpath {
 
 #define ETH_RX_ERROR_FALGS             ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
 
-#define BNX2X_IP_CSUM_ERR(cqe) \
-                       (!((cqe)->fast_path_cqe.status_flags & \
-                          ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
-                        ((cqe)->fast_path_cqe.type_error_flags & \
-                         ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
-
-#define BNX2X_L4_CSUM_ERR(cqe) \
-                       (!((cqe)->fast_path_cqe.status_flags & \
-                          ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
-                        ((cqe)->fast_path_cqe.type_error_flags & \
-                         ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
-
-#define BNX2X_RX_CSUM_OK(cqe) \
-                       (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
-
 #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
                                (((le16_to_cpu(flags) & \
                                   PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
index 580b44e..2c1a5c0 100644 (file)
@@ -220,7 +220,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
 
                if ((netif_tx_queue_stopped(txq)) &&
                    (bp->state == BNX2X_STATE_OPEN) &&
-                   (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
+                   (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
                        netif_tx_wake_queue(txq);
 
                __netif_tx_unlock(txq);
@@ -551,6 +551,26 @@ static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
                le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
 }
 
+static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
+                               struct bnx2x_fastpath *fp)
+{
+       /* Do nothing if no IP/L4 csum validation was done */
+
+       if (cqe->fast_path_cqe.status_flags &
+           (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
+            ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
+               return;
+
+       /* If both IP/L4 validation were done, check if an error was found. */
+
+       if (cqe->fast_path_cqe.type_error_flags &
+           (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
+            ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+               fp->eth_q_stats.hw_csum_err++;
+       else
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 {
        struct bnx2x *bp = fp->bp;
@@ -746,13 +766,9 @@ reuse_rx:
 
                        skb_checksum_none_assert(skb);
 
-                       if (bp->dev->features & NETIF_F_RXCSUM) {
+                       if (bp->dev->features & NETIF_F_RXCSUM)
+                               bnx2x_csum_validate(skb, cqe, fp);
 
-                               if (likely(BNX2X_RX_CSUM_OK(cqe)))
-                                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-                               else
-                                       fp->eth_q_stats.hw_csum_err++;
-                       }
                }
 
                skb_record_rx_queue(skb, fp->index);
@@ -2238,8 +2254,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
 /* we split the first BD into headers and data BDs
  * to ease the pain of our fellow microcode engineers
  * we use one mapping for both BDs
- * So far this has only been observed to happen
- * in Other Operating Systems(TM)
  */
 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
                                   struct bnx2x_fp_txdata *txdata,
@@ -2890,7 +2904,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        txdata->tx_bd_prod += nbd;
 
-       if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
+       if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
                netif_tx_stop_queue(txq);
 
                /* paired memory barrier is in bnx2x_tx_int(), we have to keep
@@ -2899,7 +2913,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                smp_mb();
 
                fp->eth_q_stats.driver_xoff++;
-               if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
+               if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
                        netif_tx_wake_queue(txq);
        }
        txdata->tx_pkt++;
index 2dcac28..6b258d9 100644 (file)
@@ -14046,7 +14046,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                }
        }
 
-       if (tg3_flag(tp, 5755_PLUS))
+       if (tg3_flag(tp, 5755_PLUS) ||
+           GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
                tg3_flag_set(tp, SHORT_DMA_BUG);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
index bf266a0..36c7c4e 100644 (file)
@@ -696,6 +696,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
 
        copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
        if (copied) {
+               int gso_segs = skb_shinfo(skb)->gso_segs;
+
                /* record the sent skb in the sent_skb table */
                BUG_ON(txo->sent_skb_list[start]);
                txo->sent_skb_list[start] = skb;
@@ -713,8 +715,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
 
                be_txq_notify(adapter, txq->id, wrb_cnt);
 
-               be_tx_stats_update(txo, wrb_cnt, copied,
-                               skb_shinfo(skb)->gso_segs, stopped);
+               be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
        } else {
                txq->head = start;
                dev_kfree_skb_any(skb);
index a3e65fd..3072d35 100644 (file)
@@ -1571,6 +1571,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
        ctrl = er32(CTRL);
        status = er32(STATUS);
        rxcw = er32(RXCW);
+       /* SYNCH bit and IV bit are sticky */
+       udelay(10);
+       rxcw = er32(RXCW);
 
        if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
 
@@ -2080,8 +2083,9 @@ const struct e1000_info e1000_82574_info = {
                                  | FLAG_HAS_SMART_POWER_DOWN
                                  | FLAG_HAS_AMT
                                  | FLAG_HAS_CTRLEXT_ON_LOAD,
-       .flags2                   = FLAG2_CHECK_PHY_HANG
+       .flags2                 = FLAG2_CHECK_PHY_HANG
                                  | FLAG2_DISABLE_ASPM_L0S
+                                 | FLAG2_DISABLE_ASPM_L1
                                  | FLAG2_NO_DISABLE_RX,
        .pba                    = 32,
        .max_hw_frame_size      = DEFAULT_JUMBO,
index 4e933d1..64d3f98 100644 (file)
@@ -5132,14 +5132,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
                return -EINVAL;
        }
 
-       /* 82573 Errata 17 */
-       if (((adapter->hw.mac.type == e1000_82573) ||
-            (adapter->hw.mac.type == e1000_82574)) &&
-           (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
-               adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
-               e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
-       }
-
        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
                usleep_range(1000, 2000);
        /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
index 65c51ff..11ddd83 100644 (file)
@@ -4361,10 +4361,12 @@ static int sky2_set_features(struct net_device *dev, u32 features)
        struct sky2_port *sky2 = netdev_priv(dev);
        u32 changed = dev->features ^ features;
 
-       if (changed & NETIF_F_RXCSUM) {
-               u32 on = features & NETIF_F_RXCSUM;
-               sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
-                            on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+       if ((changed & NETIF_F_RXCSUM) &&
+           !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
+               sky2_write32(sky2->hw,
+                            Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+                            (features & NETIF_F_RXCSUM)
+                            ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
        }
 
        if (changed & NETIF_F_RXHASH)
index cc2565c..ed1be8a 100644 (file)
@@ -3770,6 +3770,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
        case RTL_GIGA_MAC_VER_22:
        case RTL_GIGA_MAC_VER_23:
        case RTL_GIGA_MAC_VER_24:
+       case RTL_GIGA_MAC_VER_34:
                RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
                break;
        default:
@@ -4185,6 +4186,7 @@ out:
        return rc;
 
 err_out_msi_4:
+       netif_napi_del(&tp->napi);
        rtl_disable_msi(pdev, tp);
        iounmap(ioaddr);
 err_out_free_res_3:
@@ -4210,6 +4212,8 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
 
        cancel_delayed_work_sync(&tp->task);
 
+       netif_napi_del(&tp->napi);
+
        unregister_netdev(dev);
 
        rtl_release_firmware(tp);
index 72cd190..d4d2bc1 100644 (file)
@@ -1174,6 +1174,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
                wmb();
                priv->hw->desc->set_tx_owner(desc);
+               wmb();
        }
 
        /* Interrupt on completition only for the latest segment */
@@ -1189,6 +1190,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* To avoid raise condition */
        priv->hw->desc->set_tx_owner(first);
+       wmb();
 
        priv->cur_tx++;
 
@@ -1252,6 +1254,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
                }
                wmb();
                priv->hw->desc->set_rx_owner(p + entry);
+               wmb();
        }
 }
 
index 1b7082d..26106c0 100644 (file)
@@ -504,10 +504,11 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
                if (copy > size) {
                        ++from;
                        --count;
-               }
+                       offset = 0;
+               } else
+                       offset += size;
                copy -= size;
                offset1 += size;
-               offset = 0;
        }
 
        if (len == offset1)
@@ -517,24 +518,29 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
                struct page *page[MAX_SKB_FRAGS];
                int num_pages;
                unsigned long base;
+               unsigned long truesize;
 
-               len = from->iov_len - offset1;
+               len = from->iov_len - offset;
                if (!len) {
-                       offset1 = 0;
+                       offset = 0;
                        ++from;
                        continue;
                }
-               base = (unsigned long)from->iov_base + offset1;
+               base = (unsigned long)from->iov_base + offset;
                size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
+               if (i + size > MAX_SKB_FRAGS)
+                       return -EMSGSIZE;
                num_pages = get_user_pages_fast(base, size, 0, &page[i]);
-               if ((num_pages != size) ||
-                   (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
-                       /* put_page is in skb free */
+               if (num_pages != size) {
+                       for (i = 0; i < num_pages; i++)
+                               put_page(page[i]);
                        return -EFAULT;
+               }
+               truesize = size * PAGE_SIZE;
                skb->data_len += len;
                skb->len += len;
-               skb->truesize += len;
-               atomic_add(len, &skb->sk->sk_wmem_alloc);
+               skb->truesize += truesize;
+               atomic_add(truesize, &skb->sk->sk_wmem_alloc);
                while (len) {
                        int off = base & ~PAGE_MASK;
                        int size = min_t(int, len, PAGE_SIZE - off);
@@ -545,7 +551,7 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
                        len -= size;
                        i++;
                }
-               offset1 = 0;
+               offset = 0;
                ++from;
        }
        return 0;
@@ -645,7 +651,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
        int err;
        struct virtio_net_hdr vnet_hdr = { 0 };
        int vnet_hdr_len = 0;
-       int copylen;
+       int copylen = 0;
        bool zerocopy = false;
 
        if (q->flags & IFF_VNET_HDR) {
@@ -674,15 +680,31 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
        if (unlikely(len < ETH_HLEN))
                goto err;
 
+       err = -EMSGSIZE;
+       if (unlikely(count > UIO_MAXIOV))
+               goto err;
+
        if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
                zerocopy = true;
 
        if (zerocopy) {
+               /* Userspace may produce vectors with count greater than
+                * MAX_SKB_FRAGS, so we need to linearize parts of the skb
+                * to let the rest of data to be fit in the frags.
+                */
+               if (count > MAX_SKB_FRAGS) {
+                       copylen = iov_length(iv, count - MAX_SKB_FRAGS);
+                       if (copylen < vnet_hdr_len)
+                               copylen = 0;
+                       else
+                               copylen -= vnet_hdr_len;
+               }
                /* There are 256 bytes to be copied in skb, so there is enough
                 * room for skb expand head in case it is used.
                 * The rest buffer is mapped from userspace.
                 */
-               copylen = vnet_hdr.hdr_len;
+               if (copylen < vnet_hdr.hdr_len)
+                       copylen = vnet_hdr.hdr_len;
                if (!copylen)
                        copylen = GOODCOPY_LEN;
        } else
@@ -693,10 +715,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
        if (!skb)
                goto err;
 
-       if (zerocopy) {
+       if (zerocopy)
                err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
-               skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
-       } else
+       else
                err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
                                                   len);
        if (err)
@@ -715,8 +736,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
        rcu_read_lock_bh();
        vlan = rcu_dereference_bh(q->vlan);
        /* copy skb_ubuf_info for callback when skb has no error */
-       if (zerocopy)
+       if (zerocopy) {
                skb_shinfo(skb)->destructor_arg = m->msg_control;
+               skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+       }
        if (vlan)
                macvlan_start_xmit(skb, vlan->dev);
        else
index ad96164..00ed9c1 100644 (file)
@@ -59,6 +59,7 @@
 #define USB_PRODUCT_IPHONE_3G   0x1292
 #define USB_PRODUCT_IPHONE_3GS  0x1294
 #define USB_PRODUCT_IPHONE_4   0x1297
+#define USB_PRODUCT_IPAD 0x129a
 #define USB_PRODUCT_IPHONE_4_VZW 0x129c
 #define USB_PRODUCT_IPHONE_4S  0x12a0
 
@@ -100,6 +101,10 @@ static struct usb_device_id ipheth_table[] = {
                USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
                IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
                IPHETH_USBINTF_PROTO) },
+       { USB_DEVICE_AND_INTERFACE_INFO(
+               USB_VENDOR_APPLE, USB_PRODUCT_IPAD,
+               IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+               IPHETH_USBINTF_PROTO) },
        { USB_DEVICE_AND_INTERFACE_INFO(
                USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
                IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
index 0f9ee46..4cc4a8b 100644 (file)
@@ -143,6 +143,7 @@ struct ath_common {
        u32 keymax;
        DECLARE_BITMAP(keymap, ATH_KEYMAX);
        DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
+       DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
        enum ath_crypt_caps crypt_caps;
 
        unsigned int clockrate;
index 8b0c2ca..7f97164 100644 (file)
@@ -557,7 +557,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
 
        if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
                if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
-                   ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
+                   ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
                     !ah->is_pciexpress)) {
                        ah->config.serialize_regmode =
                                SER_REG_MODE_ON;
@@ -718,13 +718,25 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
 
 u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
 {
+       struct ath_common *common = ath9k_hw_common(ah);
+       int i = 0;
+
        REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
        udelay(100);
        REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
 
-       while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
+       while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) {
+
                udelay(100);
 
+               if (WARN_ON_ONCE(i >= 100)) {
+                       ath_err(common, "PLL4 meaurement not done\n");
+                       break;
+               }
+
+               i++;
+       }
+
        return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
 }
 EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
index f76a814..95437fc 100644 (file)
@@ -1042,6 +1042,15 @@ void ath_hw_pll_work(struct work_struct *work)
                                            hw_pll_work.work);
        u32 pll_sqsum;
 
+       /*
+        * ensure that the PLL WAR is executed only
+        * after the STA is associated (or) if the
+        * beaconing had started in interfaces that
+        * uses beacons.
+        */
+       if (!(sc->sc_flags & SC_OP_BEACONS))
+               return;
+
        if (AR_SREV_9485(sc->sc_ah)) {
 
                ath9k_ps_wakeup(sc);
@@ -1486,15 +1495,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
                }
        }
 
-       if ((ah->opmode == NL80211_IFTYPE_ADHOC) ||
-           ((vif->type == NL80211_IFTYPE_ADHOC) &&
-            sc->nvifs > 0)) {
-               ath_err(common, "Cannot create ADHOC interface when other"
-                       " interfaces already exist.\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
        ath_dbg(common, ATH_DBG_CONFIG,
                "Attach a VIF of type: %d\n", vif->type);
 
index 2f3aeac..e6d791c 100644 (file)
@@ -829,7 +829,8 @@ static bool ath9k_rx_accept(struct ath_common *common,
         * descriptor does contain a valid key index. This has been observed
         * mostly with CCMP encryption.
         */
-       if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
+       if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
+           !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
                rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
 
        if (!rx_stats->rs_datalen)
index 76fd277..c59c592 100644 (file)
@@ -936,13 +936,13 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
                }
 
                /* legacy rates */
+               rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
                if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
                    !(rate->flags & IEEE80211_RATE_ERP_G))
                        phy = WLAN_RC_PHY_CCK;
                else
                        phy = WLAN_RC_PHY_OFDM;
 
-               rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
                info->rates[i].Rate = rate->hw_value;
                if (rate->hw_value_short) {
                        if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
index 4cf7c5e..1ec3fa5 100644 (file)
@@ -556,6 +556,9 @@ int ath_key_config(struct ath_common *common,
                return -EIO;
 
        set_bit(idx, common->keymap);
+       if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
+               set_bit(idx, common->ccmp_keymap);
+
        if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
                set_bit(idx + 64, common->keymap);
                set_bit(idx, common->tkip_keymap);
@@ -582,6 +585,7 @@ void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
                return;
 
        clear_bit(key->hw_key_idx, common->keymap);
+       clear_bit(key->hw_key_idx, common->ccmp_keymap);
        if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
                return;
 
index 833cbef..8a40ff9 100644 (file)
@@ -900,8 +900,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
         */
        if (!(txs->status & TX_STATUS_AMPDU)
            && (txs->status & TX_STATUS_INTERMEDIATE)) {
-               wiphy_err(wlc->wiphy, "%s: INTERMEDIATE but not AMPDU\n",
-                         __func__);
+               BCMMSG(wlc->wiphy, "INTERMEDIATE but not AMPDU\n");
                return false;
        }
 
diff --git a/drivers/net/wireless/ipw2x00/ipw.h b/drivers/net/wireless/ipw2x00/ipw.h
new file mode 100644 (file)
index 0000000..4007bf5
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Intel Pro/Wireless 2100, 2200BG, 2915ABG network connection driver
+ *
+ * Copyright 2012 Stanislav Yakovlev <stas.yakovlev@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __IPW_H__
+#define __IPW_H__
+
+#include <linux/ieee80211.h>
+
+static const u32 ipw_cipher_suites[] = {
+       WLAN_CIPHER_SUITE_WEP40,
+       WLAN_CIPHER_SUITE_WEP104,
+       WLAN_CIPHER_SUITE_TKIP,
+       WLAN_CIPHER_SUITE_CCMP,
+};
+
+#endif
index 127e9c6..10862d4 100644 (file)
@@ -166,6 +166,7 @@ that only one external action is invoked at a time.
 #include <net/lib80211.h>
 
 #include "ipw2100.h"
+#include "ipw.h"
 
 #define IPW2100_VERSION "git-1.2.2"
 
@@ -1955,6 +1956,9 @@ static int ipw2100_wdev_init(struct net_device *dev)
                wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
        }
 
+       wdev->wiphy->cipher_suites = ipw_cipher_suites;
+       wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
+
        set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
        if (wiphy_register(wdev->wiphy)) {
                ipw2100_down(priv);
index 827889b..56bd370 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/slab.h>
 #include <net/cfg80211-wext.h>
 #include "ipw2200.h"
+#include "ipw.h"
 
 
 #ifndef KBUILD_EXTMOD
@@ -11535,6 +11536,9 @@ static int ipw_wdev_init(struct net_device *dev)
                wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
        }
 
+       wdev->wiphy->cipher_suites = ipw_cipher_suites;
+       wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
+
        set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
 
        /* With that information in place, we can now register the wiphy... */
index a262c23..0116ca8 100644 (file)
@@ -466,7 +466,7 @@ int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
                return 0;
        }
 
-       if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+       if (priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
                IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
                            keyconf->keyidx, key_flags);
                spin_unlock_irqrestore(&priv->sta_lock, flags);
@@ -483,7 +483,7 @@ int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
                                        sizeof(struct iwl4965_keyinfo));
        priv->stations[sta_id].sta.key.key_flags =
                        STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
-       priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+       priv->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
        priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
        priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
 
index 2bd5659..1bb64c9 100644 (file)
@@ -1884,14 +1884,12 @@ void iwl_legacy_bg_watchdog(unsigned long data)
                return;
 
        /* monitor and check for other stuck queues */
-       if (iwl_legacy_is_any_associated(priv)) {
-               for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
-                       /* skip as we already checked the command queue */
-                       if (cnt == priv->cmd_queue)
-                               continue;
-                       if (iwl_legacy_check_stuck_queue(priv, cnt))
-                               return;
-               }
+       for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+               /* skip as we already checked the command queue */
+               if (cnt == priv->cmd_queue)
+                       continue;
+               if (iwl_legacy_check_stuck_queue(priv, cnt))
+                       return;
        }
 
        mod_timer(&priv->watchdog, jiffies +
index 5815cf5..4661a64 100644 (file)
@@ -1777,6 +1777,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
+#ifdef CONFIG_IWLWIFI_DEBUG
 static ssize_t iwl_dbgfs_log_event_read(struct file *file,
                                         char __user *user_buf,
                                         size_t count, loff_t *ppos)
@@ -1814,6 +1815,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
 
        return count;
 }
+#endif
 
 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
                                        char __user *user_buf,
@@ -1941,7 +1943,9 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
        return ret;
 }
 
+#ifdef CONFIG_IWLWIFI_DEBUG
 DEBUGFS_READ_WRITE_FILE_OPS(log_event);
+#endif
 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
 DEBUGFS_READ_FILE_OPS(fh_reg);
 DEBUGFS_READ_FILE_OPS(rx_queue);
@@ -1957,7 +1961,9 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
 {
        DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
        DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
+#ifdef CONFIG_IWLWIFI_DEBUG
        DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
+#endif
        DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
        DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
        DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
index 7aa9aa0..39fd4d5 100644 (file)
@@ -267,7 +267,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
        else
                last_seq = priv->rx_seq[tid];
 
-       if (last_seq >= new_node->start_win)
+       if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
+           last_seq >= new_node->start_win)
                new_node->start_win = last_seq + 1;
 
        new_node->win_size = win_size;
@@ -611,5 +612,5 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
        spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
        INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
-       memset(priv->rx_seq, 0, sizeof(priv->rx_seq));
+       mwifiex_reset_11n_rx_seq_num(priv);
 }
index 033c8ad..7128baa 100644 (file)
 
 #define ADDBA_RSP_STATUS_ACCEPT 0
 
+#define MWIFIEX_DEF_11N_RX_SEQ_NUM     0xffff
+
+static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
+{
+       memset(priv->rx_seq, 0xff, sizeof(priv->rx_seq));
+}
+
 int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *,
                               u16 seqNum,
                               u16 tid, u8 *ta,
index 462c710..727c129 100644 (file)
@@ -545,9 +545,9 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
 
        /*
         * Bit 0 in tx_htinfo indicates that current Tx rate is 11n rate. Valid
-        * MCS index values for us are 0 to 7.
+        * MCS index values for us are 0 to 15.
         */
-       if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 8)) {
+       if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) {
                sinfo->txrate.mcs = priv->tx_rate;
                sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
                /* 40MHz rate */
@@ -1177,11 +1177,11 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        void *mdev_priv;
 
        if (!priv)
-               return NULL;
+               return ERR_PTR(-EFAULT);
 
        adapter = priv->adapter;
        if (!adapter)
-               return NULL;
+               return ERR_PTR(-EFAULT);
 
        switch (type) {
        case NL80211_IFTYPE_UNSPECIFIED:
@@ -1190,7 +1190,7 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
                if (priv->bss_mode) {
                        wiphy_err(wiphy, "cannot create multiple"
                                        " station/adhoc interfaces\n");
-                       return NULL;
+                       return ERR_PTR(-EINVAL);
                }
 
                if (type == NL80211_IFTYPE_UNSPECIFIED)
@@ -1208,14 +1208,15 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
                break;
        default:
                wiphy_err(wiphy, "type not supported\n");
-               return NULL;
+               return ERR_PTR(-EINVAL);
        }
 
        dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name,
                              ether_setup, 1);
        if (!dev) {
                wiphy_err(wiphy, "no memory available for netdevice\n");
-               goto error;
+               priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+               return ERR_PTR(-ENOMEM);
        }
 
        dev_net_set(dev, wiphy_net(wiphy));
@@ -1240,7 +1241,9 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        /* Register network device */
        if (register_netdevice(dev)) {
                wiphy_err(wiphy, "cannot register virtual network device\n");
-               goto error;
+               free_netdev(dev);
+               priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+               return ERR_PTR(-EFAULT);
        }
 
        sema_init(&priv->async_sem, 1);
@@ -1252,12 +1255,6 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        mwifiex_dev_debugfs_init(priv);
 #endif
        return dev;
-error:
-       if (dev && (dev->reg_state == NETREG_UNREGISTERED))
-               free_netdev(dev);
-       priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
-
-       return NULL;
 }
 EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
 
index 6c239c3..06fcf1e 100644 (file)
@@ -406,6 +406,8 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
                priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
                priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
 
+               mwifiex_reset_11n_rx_seq_num(priv);
+
                atomic_set(&priv->wmm.tx_pkts_queued, 0);
                atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
        }
@@ -1209,10 +1211,12 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
                return 0;
        }
 
-       if (!ptr->is_11n_enabled || mwifiex_is_ba_stream_setup(priv, ptr, tid)
-           || ((priv->sec_info.wpa_enabled
-                 || priv->sec_info.wpa2_enabled) && !priv->wpa_is_gtk_set)
-               ) {
+       if (!ptr->is_11n_enabled ||
+           mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
+           priv->wps.session_enable ||
+           ((priv->sec_info.wpa_enabled ||
+             priv->sec_info.wpa2_enabled) &&
+            !priv->wpa_is_gtk_set)) {
                mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
                /* ra_list_spinlock has been freed in
                   mwifiex_send_single_packet() */
index 0ffa111..bdf960b 100644 (file)
@@ -876,6 +876,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x1482, 0x3c09) },
        /* AirTies */
        { USB_DEVICE(0x1eda, 0x2012) },
+       { USB_DEVICE(0x1eda, 0x2210) },
        { USB_DEVICE(0x1eda, 0x2310) },
        /* Allwin */
        { USB_DEVICE(0x8516, 0x2070) },
@@ -945,6 +946,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
        /* DVICO */
        { USB_DEVICE(0x0fe9, 0xb307) },
        /* Edimax */
+       { USB_DEVICE(0x7392, 0x4085) },
        { USB_DEVICE(0x7392, 0x7711) },
        { USB_DEVICE(0x7392, 0x7717) },
        { USB_DEVICE(0x7392, 0x7718) },
@@ -1020,6 +1022,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
        /* Philips */
        { USB_DEVICE(0x0471, 0x200f) },
        /* Planex */
+       { USB_DEVICE(0x2019, 0x5201) },
        { USB_DEVICE(0x2019, 0xab25) },
        { USB_DEVICE(0x2019, 0xed06) },
        /* Quanta */
@@ -1088,6 +1091,12 @@ static struct usb_device_id rt2800usb_device_table[] = {
 #ifdef CONFIG_RT2800USB_RT33XX
        /* Belkin */
        { USB_DEVICE(0x050d, 0x945b) },
+       /* D-Link */
+       { USB_DEVICE(0x2001, 0x3c17) },
+       /* Panasonic */
+       { USB_DEVICE(0x083a, 0xb511) },
+       /* Philips */
+       { USB_DEVICE(0x0471, 0x20dd) },
        /* Ralink */
        { USB_DEVICE(0x148f, 0x3370) },
        { USB_DEVICE(0x148f, 0x8070) },
@@ -1099,6 +1108,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x8516, 0x3572) },
        /* Askey */
        { USB_DEVICE(0x1690, 0x0744) },
+       { USB_DEVICE(0x1690, 0x0761) },
+       { USB_DEVICE(0x1690, 0x0764) },
        /* Cisco */
        { USB_DEVICE(0x167b, 0x4001) },
        /* EnGenius */
@@ -1113,6 +1124,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
        /* Sitecom */
        { USB_DEVICE(0x0df6, 0x0041) },
        { USB_DEVICE(0x0df6, 0x0062) },
+       { USB_DEVICE(0x0df6, 0x0065) },
+       { USB_DEVICE(0x0df6, 0x0066) },
+       { USB_DEVICE(0x0df6, 0x0068) },
        /* Toshiba */
        { USB_DEVICE(0x0930, 0x0a07) },
        /* Zinwell */
@@ -1122,6 +1136,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
        /* Azurewave */
        { USB_DEVICE(0x13d3, 0x3329) },
        { USB_DEVICE(0x13d3, 0x3365) },
+       /* D-Link */
+       { USB_DEVICE(0x2001, 0x3c1c) },
+       { USB_DEVICE(0x2001, 0x3c1d) },
        /* Ralink */
        { USB_DEVICE(0x148f, 0x5370) },
        { USB_DEVICE(0x148f, 0x5372) },
@@ -1163,13 +1180,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
        /* D-Link */
        { USB_DEVICE(0x07d1, 0x3c0b) },
        { USB_DEVICE(0x07d1, 0x3c17) },
-       { USB_DEVICE(0x2001, 0x3c17) },
-       /* Edimax */
-       { USB_DEVICE(0x7392, 0x4085) },
        /* Encore */
        { USB_DEVICE(0x203d, 0x14a1) },
-       /* Fujitsu Stylistic 550 */
-       { USB_DEVICE(0x1690, 0x0761) },
        /* Gemtek */
        { USB_DEVICE(0x15a9, 0x0010) },
        /* Gigabyte */
@@ -1190,7 +1202,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x05a6, 0x0101) },
        { USB_DEVICE(0x1d4d, 0x0010) },
        /* Planex */
-       { USB_DEVICE(0x2019, 0x5201) },
        { USB_DEVICE(0x2019, 0xab24) },
        /* Qcom */
        { USB_DEVICE(0x18e8, 0x6259) },
index 1e31050..ba28807 100644 (file)
@@ -426,8 +426,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
        case QID_RX:
                if (!rt2x00queue_full(queue))
                        rt2x00queue_for_each_entry(queue,
-                                                  Q_INDEX_DONE,
                                                   Q_INDEX,
+                                                  Q_INDEX_DONE,
                                                   NULL,
                                                   rt2x00usb_kick_rx_entry);
                break;
index 2e0de2f..c2d5b49 100644 (file)
@@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev,
                        radio_on = true;
                } else if (radio_on) {
                        radio_on = false;
-                       cancel_delayed_work_sync(&priv->led_on);
+                       cancel_delayed_work(&priv->led_on);
                        ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
                }
        } else if (radio_on) {
index 94a3e17..0302148 100644 (file)
@@ -311,9 +311,11 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
        {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+       {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
        {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
        {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+       {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
        /* HP - Lite-On ,8188CUS Slim Combo */
        {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
        {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
@@ -355,6 +357,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
        {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
        {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+       {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/
        {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
        {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
        {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
index 2cf4c5f..de9faa9 100644 (file)
@@ -3462,21 +3462,21 @@ void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw)
        switch (rtlhal->macphymode) {
        case DUALMAC_SINGLEPHY:
                rtlphy->rf_type = RF_2T2R;
-               rtlhal->version |= CHIP_92D_SINGLEPHY;
+               rtlhal->version |= RF_TYPE_2T2R;
                rtlhal->bandset = BAND_ON_BOTH;
                rtlhal->current_bandtype = BAND_ON_2_4G;
                break;
 
        case SINGLEMAC_SINGLEPHY:
                rtlphy->rf_type = RF_2T2R;
-               rtlhal->version |= CHIP_92D_SINGLEPHY;
+               rtlhal->version |= RF_TYPE_2T2R;
                rtlhal->bandset = BAND_ON_BOTH;
                rtlhal->current_bandtype = BAND_ON_2_4G;
                break;
 
        case DUALMAC_DUALPHY:
                rtlphy->rf_type = RF_1T1R;
-               rtlhal->version &= (~CHIP_92D_SINGLEPHY);
+               rtlhal->version &= RF_TYPE_1T1R;
                /* Now we let MAC0 run on 5G band. */
                if (rtlhal->interfaceindex == 0) {
                        rtlhal->bandset = BAND_ON_5G;
index 226faab..fc35308 100644 (file)
@@ -1922,14 +1922,14 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
 
        dev_dbg(&dev->dev, "%s\n", dev->nodename);
 
-       unregister_netdev(info->netdev);
-
        xennet_disconnect_backend(info);
 
-       del_timer_sync(&info->rx_refill_timer);
-
        xennet_sysfs_delif(info->netdev);
 
+       unregister_netdev(info->netdev);
+
+       del_timer_sync(&info->rx_refill_timer);
+
        free_percpu(info->stats);
 
        free_netdev(info->netdev);
index da14432..efc4b7f 100644 (file)
@@ -25,7 +25,7 @@ static int oprofile_perf_enabled;
 static DEFINE_MUTEX(oprofile_perf_mutex);
 
 static struct op_counter_config *counter_config;
-static struct perf_event **perf_events[nr_cpumask_bits];
+static struct perf_event **perf_events[NR_CPUS];
 static int num_counters;
 
 /*
index 12d1e81..d024f83 100644 (file)
@@ -742,6 +742,18 @@ static int pci_pm_suspend_noirq(struct device *dev)
 
        pci_pm_set_unknown_state(pci_dev);
 
+       /*
+        * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
+        * PCI COMMAND register isn't 0, the BIOS assumes that the controller
+        * hasn't been quiesced and tries to turn it off.  If the controller
+        * is already in D3, this can hang or cause memory corruption.
+        *
+        * Since the value of the COMMAND register doesn't matter once the
+        * device has been suspended, we can safely set it to 0 here.
+        */
+       if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
+               pci_write_config_word(pci_dev, PCI_COMMAND, 0);
+
        return 0;
 }
 
index e5b75eb..6d4a531 100644 (file)
@@ -1689,11 +1689,6 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
        if (target_state == PCI_POWER_ERROR)
                return -EIO;
 
-       /* Some devices mustn't be in D3 during system sleep */
-       if (target_state == PCI_D3hot &&
-                       (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP))
-               return 0;
-
        pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
 
        error = pci_set_power_state(dev, target_state);
index 3c56fec..78fda9c 100644 (file)
@@ -2940,32 +2940,6 @@ static void __devinit disable_igfx_irq(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
 
-/*
- * The Intel 6 Series/C200 Series chipset's EHCI controllers on many
- * ASUS motherboards will cause memory corruption or a system crash
- * if they are in D3 while the system is put into S3 sleep.
- */
-static void __devinit asus_ehci_no_d3(struct pci_dev *dev)
-{
-       const char *sys_info;
-       static const char good_Asus_board[] = "P8Z68-V";
-
-       if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)
-               return;
-       if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK)
-               return;
-       sys_info = dmi_get_system_info(DMI_BOARD_NAME);
-       if (sys_info && memcmp(sys_info, good_Asus_board,
-                       sizeof(good_Asus_board) - 1) == 0)
-               return;
-
-       dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n");
-       dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP;
-       device_set_wakeup_capable(&dev->dev, false);
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3);
-
 static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
                          struct pci_fixup *end)
 {
index 809a3ae..b46ec11 100644 (file)
@@ -72,6 +72,7 @@
 #include <linux/string.h>
 #include <linux/tick.h>
 #include <linux/timer.h>
+#include <linux/dmi.h>
 #include <drm/i915_drm.h>
 #include <asm/msr.h>
 #include <asm/processor.h>
@@ -1505,6 +1506,24 @@ static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
 
 MODULE_DEVICE_TABLE(pci, ips_id_table);
 
+static int ips_blacklist_callback(const struct dmi_system_id *id)
+{
+       pr_info("Blacklisted intel_ips for %s\n", id->ident);
+       return 1;
+}
+
+static const struct dmi_system_id ips_blacklist[] = {
+       {
+               .callback = ips_blacklist_callback,
+               .ident = "HP ProBook",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
+               },
+       },
+       { }     /* terminating entry */
+};
+
 static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
        u64 platform_info;
@@ -1514,6 +1533,9 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
        u16 htshi, trc, trc_required_mask;
        u8 tse;
 
+       if (dmi_check_system(ips_blacklist))
+               return -ENODEV;
+
        ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
        if (!ips)
                return -ENOMEM;
index 09e26bf..af1e296 100644 (file)
@@ -540,245 +540,34 @@ static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO,
                   get_performance_level, set_performance_level);
 
 
-static int __init dmi_check_cb(const struct dmi_system_id *id)
-{
-       pr_info("found laptop model '%s'\n",
-               id->ident);
-       return 1;
-}
-
 static struct dmi_system_id __initdata samsung_dmi_table[] = {
        {
-               .ident = "N128",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N128"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N128"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "N130",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR,
                                        "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N130"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N130"),
+                       DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
                },
-               .callback = dmi_check_cb,
        },
        {
-               .ident = "N510",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR,
                                        "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N510"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N510"),
+                       DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
                },
-               .callback = dmi_check_cb,
        },
        {
-               .ident = "X125",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR,
                                        "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "X125"),
-                       DMI_MATCH(DMI_BOARD_NAME, "X125"),
+                       DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
                },
-               .callback = dmi_check_cb,
        },
        {
-               .ident = "X120/X170",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR,
                                        "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "X120/X170"),
-                       DMI_MATCH(DMI_BOARD_NAME, "X120/X170"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "NC10",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "NC10"),
-                       DMI_MATCH(DMI_BOARD_NAME, "NC10"),
-               },
-               .callback = dmi_check_cb,
-       },
-               {
-               .ident = "NP-Q45",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"),
-                       DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"),
-               },
-               .callback = dmi_check_cb,
-               },
-       {
-               .ident = "X360",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
-                       DMI_MATCH(DMI_BOARD_NAME, "X360"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "R410 Plus",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "R410P"),
-                       DMI_MATCH(DMI_BOARD_NAME, "R460"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "R518",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "R518"),
-                       DMI_MATCH(DMI_BOARD_NAME, "R518"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "R519/R719",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "R519/R719"),
-                       DMI_MATCH(DMI_BOARD_NAME, "R519/R719"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "N150/N210/N220",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "N220",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N220"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N220"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "N150/N210/N220/N230",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "N150P/N210P/N220P",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N150P/N210P/N220P"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N150P/N210P/N220P"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "R700",
-               .matches = {
-                     DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
-                     DMI_MATCH(DMI_PRODUCT_NAME, "SR700"),
-                     DMI_MATCH(DMI_BOARD_NAME, "SR700"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "R530/R730",
-               .matches = {
-                     DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
-                     DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"),
-                     DMI_MATCH(DMI_BOARD_NAME, "R530/R730"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "NF110/NF210/NF310",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"),
-                       DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "N145P/N250P/N260P",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
-                       DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "R70/R71",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR,
-                                       "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "R70/R71"),
-                       DMI_MATCH(DMI_BOARD_NAME, "R70/R71"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "P460",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "P460"),
-                       DMI_MATCH(DMI_BOARD_NAME, "P460"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "R528/R728",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "R528/R728"),
-                       DMI_MATCH(DMI_BOARD_NAME, "R528/R728"),
-               },
-               .callback = dmi_check_cb,
-       },
-       {
-               .ident = "NC210/NC110",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"),
-                       DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
-               },
-               .callback = dmi_check_cb,
-       },
-               {
-               .ident = "X520",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "X520"),
-                       DMI_MATCH(DMI_BOARD_NAME, "X520"),
+                       DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
                },
-               .callback = dmi_check_cb,
        },
        { },
 };
@@ -819,7 +608,8 @@ static int __init samsung_init(void)
 
        f0000_segment = ioremap_nocache(0xf0000, 0xffff);
        if (!f0000_segment) {
-               pr_err("Can't map the segment at 0xf0000\n");
+               if (debug || force)
+                       pr_err("Can't map the segment at 0xf0000\n");
                return -EINVAL;
        }
 
@@ -832,7 +622,8 @@ static int __init samsung_init(void)
        }
 
        if (loca == 0xffff) {
-               pr_err("This computer does not support SABI\n");
+               if (debug || force)
+                       pr_err("This computer does not support SABI\n");
                goto error_no_signature;
        }
 
index 39e41fb..5160354 100644 (file)
@@ -191,10 +191,11 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
        struct platform_device *pdev = dev_id;
        struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
        void __iomem *ioaddr = pdata->ioaddr;
+       unsigned long flags;
        u32 status;
        u32 events = 0;
 
-       spin_lock_irq(&pdata->rtc->irq_lock);
+       spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
        status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR);
        /* clear interrupt sources */
        writew(status, ioaddr + RTC_RTCISR);
@@ -217,7 +218,7 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
                rtc_update_alarm(&pdev->dev, &pdata->g_rtc_alarm);
 
        rtc_update_irq(pdata->rtc, 1, events);
-       spin_unlock_irq(&pdata->rtc->irq_lock);
+       spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
 
        return IRQ_HANDLED;
 }
index 532d212..393e7ce 100644 (file)
@@ -201,7 +201,7 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb,
 
                if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
                        resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
-                       memcpy(&resp->ending_fis[0], r+16, 24);
+                       memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
                        ts->buf_valid_size = sizeof(*resp);
                }
        }
index 351dc0b..ee77a58 100644 (file)
@@ -287,6 +287,7 @@ static void scsi_host_dev_release(struct device *dev)
        struct Scsi_Host *shost = dev_to_shost(dev);
        struct device *parent = dev->parent;
        struct request_queue *q;
+       void *queuedata;
 
        scsi_proc_hostdir_rm(shost->hostt);
 
@@ -296,9 +297,9 @@ static void scsi_host_dev_release(struct device *dev)
                destroy_workqueue(shost->work_q);
        q = shost->uspace_req_q;
        if (q) {
-               kfree(q->queuedata);
-               q->queuedata = NULL;
-               scsi_free_queue(q);
+               queuedata = q->queuedata;
+               blk_cleanup_queue(q);
+               kfree(queuedata);
        }
 
        scsi_destroy_command_freelist(shost);
index db9238f..4868fc9 100644 (file)
@@ -112,12 +112,12 @@ static void sas_ata_task_done(struct sas_task *task)
        if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
            ((stat->stat == SAM_STAT_CHECK_CONDITION &&
              dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
-               ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
+               memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
 
                if (!link->sactive) {
-                       qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+                       qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
                } else {
-                       link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+                       link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
                        if (unlikely(link->eh_info.err_mask))
                                qc->flags |= ATA_QCFLAG_FAILED;
                }
@@ -138,8 +138,8 @@ static void sas_ata_task_done(struct sas_task *task)
                                qc->flags |= ATA_QCFLAG_FAILED;
                        }
 
-                       dev->sata_dev.tf.feature = 0x04; /* status err */
-                       dev->sata_dev.tf.command = ATA_ERR;
+                       dev->sata_dev.fis[3] = 0x04; /* status err */
+                       dev->sata_dev.fis[2] = ATA_ERR;
                }
        }
 
@@ -252,7 +252,7 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
 {
        struct domain_device *dev = qc->ap->private_data;
 
-       memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf));
+       ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
        return true;
 }
 
index e48ba4b..dbe3568 100644 (file)
@@ -774,7 +774,7 @@ static struct domain_device *sas_ex_discover_end_dev(
 }
 
 /* See if this phy is part of a wide port */
-static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
+static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
 {
        struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
        int i;
@@ -790,11 +790,11 @@ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
                        sas_port_add_phy(ephy->port, phy->phy);
                        phy->port = ephy->port;
                        phy->phy_state = PHY_DEVICE_DISCOVERED;
-                       return 0;
+                       return true;
                }
        }
 
-       return -ENODEV;
+       return false;
 }
 
 static struct domain_device *sas_ex_discover_expander(
@@ -932,8 +932,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
                return res;
        }
 
-       res = sas_ex_join_wide_port(dev, phy_id);
-       if (!res) {
+       if (sas_ex_join_wide_port(dev, phy_id)) {
                SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
                            phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
                return res;
@@ -978,8 +977,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
                        if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
                            SAS_ADDR(child->sas_addr)) {
                                ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
-                               res = sas_ex_join_wide_port(dev, i);
-                               if (!res)
+                               if (sas_ex_join_wide_port(dev, i))
                                        SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
                                                    i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
 
@@ -1849,32 +1847,20 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
 {
        struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id];
        struct domain_device *child;
-       bool found = false;
-       int res, i;
+       int res;
 
        SAS_DPRINTK("ex %016llx phy%d new device attached\n",
                    SAS_ADDR(dev->sas_addr), phy_id);
        res = sas_ex_phy_discover(dev, phy_id);
        if (res)
-               goto out;
-       /* to support the wide port inserted */
-       for (i = 0; i < dev->ex_dev.num_phys; i++) {
-               struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i];
-               if (i == phy_id)
-                       continue;
-               if (SAS_ADDR(ex_phy_temp->attached_sas_addr) ==
-                   SAS_ADDR(ex_phy->attached_sas_addr)) {
-                       found = true;
-                       break;
-               }
-       }
-       if (found) {
-               sas_ex_join_wide_port(dev, phy_id);
+               return res;
+
+       if (sas_ex_join_wide_port(dev, phy_id))
                return 0;
-       }
+
        res = sas_ex_discover_devices(dev, phy_id);
-       if (!res)
-               goto out;
+       if (res)
+               return res;
        list_for_each_entry(child, &dev->ex_dev.children, siblings) {
                if (SAS_ADDR(child->sas_addr) ==
                    SAS_ADDR(ex_phy->attached_sas_addr)) {
@@ -1884,7 +1870,6 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
                        break;
                }
        }
-out:
        return res;
 }
 
@@ -1983,9 +1968,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
        struct domain_device *dev = NULL;
 
        res = sas_find_bcast_dev(port_dev, &dev);
-       if (res)
-               goto out;
-       if (dev) {
+       while (res == 0 && dev) {
                struct expander_device *ex = &dev->ex_dev;
                int i = 0, phy_id;
 
@@ -1997,8 +1980,10 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
                        res = sas_rediscover(dev, phy_id);
                        i = phy_id + 1;
                } while (i < ex->num_phys);
+
+               dev = NULL;
+               res = sas_find_bcast_dev(port_dev, &dev);
        }
-out:
        return res;
 }
 
index 2aeb2e9..831db24 100644 (file)
@@ -785,7 +785,13 @@ static void scsi_done(struct scsi_cmnd *cmd)
 /* Move this to a header if it becomes more generally useful */
 static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
 {
-       return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
+       struct scsi_driver **sdp;
+
+       sdp = (struct scsi_driver **)cmd->request->rq_disk->private_data;
+       if (!sdp)
+               return NULL;
+
+       return *sdp;
 }
 
 /**
index dc6131e..456b131 100644 (file)
@@ -1673,6 +1673,20 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
         * requests are started.
         */
        scsi_run_host_queues(shost);
+
+       /*
+        * if eh is active and host_eh_scheduled is pending we need to re-run
+        * recovery.  we do this check after scsi_run_host_queues() to allow
+        * everything pent up since the last eh run a chance to make forward
+        * progress before we sync again.  Either we'll immediately re-run
+        * recovery or scsi_device_unbusy() will wake us again when these
+        * pending commands complete.
+        */
+       spin_lock_irqsave(shost->host_lock, flags);
+       if (shost->host_eh_scheduled)
+               if (scsi_host_set_state(shost, SHOST_RECOVERY))
+                       WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
+       spin_unlock_irqrestore(shost->host_lock, flags);
 }
 
 /**
index f0ab58e..6c4b620 100644 (file)
@@ -406,10 +406,6 @@ static void scsi_run_queue(struct request_queue *q)
        LIST_HEAD(starved_list);
        unsigned long flags;
 
-       /* if the device is dead, sdev will be NULL, so no queue to run */
-       if (!sdev)
-               return;
-
        shost = sdev->host;
        if (scsi_target(sdev)->single_lun)
                scsi_single_lun_run(sdev);
@@ -483,15 +479,26 @@ void scsi_requeue_run_queue(struct work_struct *work)
  */
 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
 {
+       struct scsi_device *sdev = cmd->device;
        struct request *req = cmd->request;
        unsigned long flags;
 
+       /*
+        * We need to hold a reference on the device to avoid the queue being
+        * killed after the unlock and before scsi_run_queue is invoked which
+        * may happen because scsi_unprep_request() puts the command which
+        * releases its reference on the device.
+        */
+       get_device(&sdev->sdev_gendev);
+
        spin_lock_irqsave(q->queue_lock, flags);
        scsi_unprep_request(req);
        blk_requeue_request(q, req);
        spin_unlock_irqrestore(q->queue_lock, flags);
 
        scsi_run_queue(q);
+
+       put_device(&sdev->sdev_gendev);
 }
 
 void scsi_next_command(struct scsi_cmnd *cmd)
@@ -1374,16 +1381,16 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
  * may be changed after request stacking drivers call the function,
  * regardless of taking lock or not.
  *
- * When scsi can't dispatch I/Os anymore and needs to kill I/Os
- * (e.g. !sdev), scsi needs to return 'not busy'.
- * Otherwise, request stacking drivers may hold requests forever.
+ * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
+ * needs to return 'not busy'. Otherwise, request stacking drivers
+ * may hold requests forever.
  */
 static int scsi_lld_busy(struct request_queue *q)
 {
        struct scsi_device *sdev = q->queuedata;
        struct Scsi_Host *shost;
 
-       if (!sdev)
+       if (blk_queue_dead(q))
                return 0;
 
        shost = sdev->host;
@@ -1494,12 +1501,6 @@ static void scsi_request_fn(struct request_queue *q)
        struct scsi_cmnd *cmd;
        struct request *req;
 
-       if (!sdev) {
-               while ((req = blk_peek_request(q)) != NULL)
-                       scsi_kill_request(req, q);
-               return;
-       }
-
        if(!get_device(&sdev->sdev_gendev))
                /* We must be tearing the block queue down already */
                return;
@@ -1701,20 +1702,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
        return q;
 }
 
-void scsi_free_queue(struct request_queue *q)
-{
-       unsigned long flags;
-
-       WARN_ON(q->queuedata);
-
-       /* cause scsi_request_fn() to kill all non-finished requests */
-       spin_lock_irqsave(q->queue_lock, flags);
-       q->request_fn(q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
-
-       blk_cleanup_queue(q);
-}
-
 /*
  * Function:    scsi_block_requests()
  *
index 5b475d0..d58adca 100644 (file)
@@ -85,7 +85,6 @@ extern void scsi_next_command(struct scsi_cmnd *cmd);
 extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
 extern void scsi_run_host_queues(struct Scsi_Host *shost);
 extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
-extern void scsi_free_queue(struct request_queue *q);
 extern int scsi_init_queue(void);
 extern void scsi_exit_queue(void);
 struct request_queue;
index 6e7ea4a..a48b59c 100644 (file)
@@ -1710,6 +1710,9 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
 {
        struct scsi_device *sdev;
        shost_for_each_device(sdev, shost) {
+               /* target removed before the device could be added */
+               if (sdev->sdev_state == SDEV_DEL)
+                       continue;
                if (!scsi_host_scan_allowed(shost) ||
                    scsi_sysfs_add_sdev(sdev) != 0)
                        __scsi_remove_device(sdev);
index 04c2a27..bb7c482 100644 (file)
@@ -971,11 +971,8 @@ void __scsi_remove_device(struct scsi_device *sdev)
                sdev->host->hostt->slave_destroy(sdev);
        transport_destroy_device(dev);
 
-       /* cause the request function to reject all I/O requests */
-       sdev->request_queue->queuedata = NULL;
-
        /* Freeing the queue signals to block that we're done */
-       scsi_free_queue(sdev->request_queue);
+       blk_cleanup_queue(sdev->request_queue);
        put_device(dev);
 }
 
@@ -1000,7 +997,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
        struct scsi_device *sdev;
 
        spin_lock_irqsave(shost->host_lock, flags);
-       starget->reap_ref++;
  restart:
        list_for_each_entry(sdev, &shost->__devices, siblings) {
                if (sdev->channel != starget->channel ||
@@ -1014,14 +1010,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
                goto restart;
        }
        spin_unlock_irqrestore(shost->host_lock, flags);
-       scsi_target_reap(starget);
-}
-
-static int __remove_child (struct device * dev, void * data)
-{
-       if (scsi_is_target_device(dev))
-               __scsi_remove_target(to_scsi_target(dev));
-       return 0;
 }
 
 /**
@@ -1034,14 +1022,34 @@ static int __remove_child (struct device * dev, void * data)
  */
 void scsi_remove_target(struct device *dev)
 {
-       if (scsi_is_target_device(dev)) {
-               __scsi_remove_target(to_scsi_target(dev));
-               return;
+       struct Scsi_Host *shost = dev_to_shost(dev->parent);
+       struct scsi_target *starget, *found;
+       unsigned long flags;
+
+ restart:
+       found = NULL;
+       spin_lock_irqsave(shost->host_lock, flags);
+       list_for_each_entry(starget, &shost->__targets, siblings) {
+               if (starget->state == STARGET_DEL)
+                       continue;
+               if (starget->dev.parent == dev || &starget->dev == dev) {
+                       found = starget;
+                       found->reap_ref++;
+                       break;
+               }
        }
+       spin_unlock_irqrestore(shost->host_lock, flags);
 
-       get_device(dev);
-       device_for_each_child(dev, NULL, __remove_child);
-       put_device(dev);
+       if (found) {
+               __scsi_remove_target(found);
+               scsi_target_reap(found);
+               /* in the case where @dev has multiple starget children,
+                * continue removing.
+                *
+                * FIXME: does such a case exist?
+                */
+               goto restart;
+       }
 }
 EXPORT_SYMBOL(scsi_remove_target);
 
index 54423ab..2ee187f 100644 (file)
@@ -241,6 +241,7 @@ static const struct attribute_group ad7606_attribute_group = {
                .indexed = 1,                           \
                .channel = num,                         \
                .address = num,                         \
+               .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
                .scan_index = num,                      \
                .scan_type = IIO_ST('s', 16, 16, 0),    \
        }
index ec41d38..f4b738f 100644 (file)
@@ -102,6 +102,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
        /* - */
        {USB_DEVICE(0x20F4, 0x646B)},
        {USB_DEVICE(0x083A, 0xC512)},
+       {USB_DEVICE(0x25D4, 0x4CA1)},
+       {USB_DEVICE(0x25D4, 0x4CAB)},
 
 /* RTL8191SU */
        /* Realtek */
index 4e3d2c1..9b2e5c9 100644 (file)
@@ -335,6 +335,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
        int sg_cnt, i, resid;
        int err = 0;
        long timeleft;
+       struct scatterlist *sg_ptr;
        u32 val = TRIG_DMA;
 
        if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
@@ -371,7 +372,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
        sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
 
        resid = size;
-
+       sg_ptr = sg;
        chip->sgi = 0;
        /* Usually the next entry will be @sg@ + 1, but if this sg element
         * is part of a chained scatterlist, it could jump to the start of
@@ -379,14 +380,14 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
         * the proper sg
         */
        for (i = 0; i < *index; i++)
-               sg = sg_next(sg);
+               sg_ptr = sg_next(sg_ptr);
        for (i = *index; i < sg_cnt; i++) {
                dma_addr_t addr;
                unsigned int len;
                u8 option;
 
-               addr = sg_dma_address(sg);
-               len = sg_dma_len(sg);
+               addr = sg_dma_address(sg_ptr);
+               len = sg_dma_len(sg_ptr);
 
                RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
                             (unsigned int)addr, len);
@@ -415,7 +416,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
                if (!resid)
                        break;
 
-               sg = sg_next(sg);
+               sg_ptr = sg_next(sg_ptr);
        }
 
        RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
index 0842cc7..2ff1255 100644 (file)
@@ -427,19 +427,8 @@ int iscsit_reset_np_thread(
 
 int iscsit_del_np_comm(struct iscsi_np *np)
 {
-       if (!np->np_socket)
-               return 0;
-
-       /*
-        * Some network transports allocate their own struct sock->file,
-        * see  if we need to free any additional allocated resources.
-        */
-       if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
-               kfree(np->np_socket->file);
-               np->np_socket->file = NULL;
-       }
-
-       sock_release(np->np_socket);
+       if (np->np_socket)
+               sock_release(np->np_socket);
        return 0;
 }
 
@@ -4105,13 +4094,8 @@ int iscsit_close_connection(
        kfree(conn->conn_ops);
        conn->conn_ops = NULL;
 
-       if (conn->sock) {
-               if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
-                       kfree(conn->sock->file);
-                       conn->sock->file = NULL;
-               }
+       if (conn->sock)
                sock_release(conn->sock);
-       }
        conn->thread_set = NULL;
 
        pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
index 7da2d6a..0f68197 100644 (file)
@@ -224,7 +224,6 @@ enum iscsi_timer_flags_table {
 /* Used for struct iscsi_np->np_flags */
 enum np_flags_table {
        NPF_IP_NETWORK          = 0x00,
-       NPF_SCTP_STRUCT_FILE    = 0x01 /* Bugfix */
 };
 
 /* Used for struct iscsi_np->np_thread_state */
@@ -511,7 +510,6 @@ struct iscsi_conn {
        u16                     local_port;
        int                     net_size;
        u32                     auth_id;
-#define CONNFLAG_SCTP_STRUCT_FILE                      0x01
        u32                     conn_flags;
        /* Used for iscsi_tx_login_rsp() */
        u32                     login_itt;
index bd2adec..2ec5339 100644 (file)
@@ -792,22 +792,6 @@ int iscsi_target_setup_login_socket(
                return ret;
        }
        np->np_socket = sock;
-       /*
-        * The SCTP stack needs struct socket->file.
-        */
-       if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
-           (np->np_network_transport == ISCSI_SCTP_UDP)) {
-               if (!sock->file) {
-                       sock->file = kzalloc(sizeof(struct file), GFP_KERNEL);
-                       if (!sock->file) {
-                               pr_err("Unable to allocate struct"
-                                               " file for SCTP\n");
-                               ret = -ENOMEM;
-                               goto fail;
-                       }
-                       np->np_flags |= NPF_SCTP_STRUCT_FILE;
-               }
-       }
        /*
         * Setup the np->np_sockaddr from the passed sockaddr setup
         * in iscsi_target_configfs.c code..
@@ -857,21 +841,15 @@ int iscsi_target_setup_login_socket(
 
 fail:
        np->np_socket = NULL;
-       if (sock) {
-               if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
-                       kfree(sock->file);
-                       sock->file = NULL;
-               }
-
+       if (sock)
                sock_release(sock);
-       }
        return ret;
 }
 
 static int __iscsi_target_login_thread(struct iscsi_np *np)
 {
        u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0;
-       int err, ret = 0, ip_proto, sock_type, set_sctp_conn_flag, stop;
+       int err, ret = 0, ip_proto, sock_type, stop;
        struct iscsi_conn *conn = NULL;
        struct iscsi_login *login;
        struct iscsi_portal_group *tpg = NULL;
@@ -882,7 +860,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
        struct sockaddr_in6 sock_in6;
 
        flush_signals(current);
-       set_sctp_conn_flag = 0;
        sock = np->np_socket;
        ip_proto = np->np_ip_proto;
        sock_type = np->np_sock_type;
@@ -907,35 +884,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
                spin_unlock_bh(&np->np_thread_lock);
                goto out;
        }
-       /*
-        * The SCTP stack needs struct socket->file.
-        */
-       if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
-           (np->np_network_transport == ISCSI_SCTP_UDP)) {
-               if (!new_sock->file) {
-                       new_sock->file = kzalloc(
-                                       sizeof(struct file), GFP_KERNEL);
-                       if (!new_sock->file) {
-                               pr_err("Unable to allocate struct"
-                                               " file for SCTP\n");
-                               sock_release(new_sock);
-                               /* Get another socket */
-                               return 1;
-                       }
-                       set_sctp_conn_flag = 1;
-               }
-       }
-
        iscsi_start_login_thread_timer(np);
 
        conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
        if (!conn) {
                pr_err("Could not allocate memory for"
                        " new connection\n");
-               if (set_sctp_conn_flag) {
-                       kfree(new_sock->file);
-                       new_sock->file = NULL;
-               }
                sock_release(new_sock);
                /* Get another socket */
                return 1;
@@ -945,9 +899,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
        conn->conn_state = TARG_CONN_STATE_FREE;
        conn->sock = new_sock;
 
-       if (set_sctp_conn_flag)
-               conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE;
-
        pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
        conn->conn_state = TARG_CONN_STATE_XPT_UP;
 
@@ -1195,13 +1146,8 @@ old_sess_out:
                iscsi_release_param_list(conn->param_list);
                conn->param_list = NULL;
        }
-       if (conn->sock) {
-               if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
-                       kfree(conn->sock->file);
-                       conn->sock->file = NULL;
-               }
+       if (conn->sock)
                sock_release(conn->sock);
-       }
        kfree(conn);
 
        if (tpg) {
index 65ea65a..717a8d4 100644 (file)
@@ -1114,11 +1114,11 @@ int target_emulate_unmap(struct se_task *task)
        struct se_cmd *cmd = task->task_se_cmd;
        struct se_device *dev = cmd->se_dev;
        unsigned char *buf, *ptr = NULL;
-       unsigned char *cdb = &cmd->t_task_cdb[0];
        sector_t lba;
-       unsigned int size = cmd->data_length, range;
-       int ret = 0, offset;
-       unsigned short dl, bd_dl;
+       int size = cmd->data_length;
+       u32 range;
+       int ret = 0;
+       int dl, bd_dl;
 
        if (!dev->transport->do_discard) {
                pr_err("UNMAP emulation not supported for: %s\n",
@@ -1127,24 +1127,41 @@ int target_emulate_unmap(struct se_task *task)
                return -ENOSYS;
        }
 
-       /* First UNMAP block descriptor starts at 8 byte offset */
-       offset = 8;
-       size -= 8;
-       dl = get_unaligned_be16(&cdb[0]);
-       bd_dl = get_unaligned_be16(&cdb[2]);
-
        buf = transport_kmap_data_sg(cmd);
 
-       ptr = &buf[offset];
-       pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
+       dl = get_unaligned_be16(&buf[0]);
+       bd_dl = get_unaligned_be16(&buf[2]);
+
+       size = min(size - 8, bd_dl);
+       if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
+               goto err;
+       }
+
+       /* First UNMAP block descriptor starts at 8 byte offset */
+       ptr = &buf[8];
+       pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
                " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
 
-       while (size) {
+       while (size >= 16) {
                lba = get_unaligned_be64(&ptr[0]);
                range = get_unaligned_be32(&ptr[8]);
                pr_debug("UNMAP: Using lba: %llu and range: %u\n",
                                 (unsigned long long)lba, range);
 
+               if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) {
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
+                       goto err;
+               }
+
+               if (lba + range > dev->transport->get_blocks(dev) + 1) {
+                       cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE;
+                       ret = -EINVAL;
+                       goto err;
+               }
+
                ret = dev->transport->do_discard(dev, lba, range);
                if (ret < 0) {
                        pr_err("blkdev_issue_discard() failed: %d\n",
@@ -1199,7 +1216,7 @@ int target_emulate_write_same(struct se_task *task)
        if (num_blocks != 0)
                range = num_blocks;
        else
-               range = (dev->transport->get_blocks(dev) - lba);
+               range = (dev->transport->get_blocks(dev) - lba) + 1;
 
        pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
                 (unsigned long long)lba, (unsigned long long)range);
index b75bc92..9145141 100644 (file)
@@ -2042,7 +2042,7 @@ static int __core_scsi3_write_aptpl_to_file(
        if (IS_ERR(file) || !file || !file->f_dentry) {
                pr_err("filp_open(%s) for APTPL metadata"
                        " failed\n", path);
-               return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
+               return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
        }
 
        iov[0].iov_base = &buf[0];
@@ -3853,7 +3853,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
                        " SPC-2 reservation is held, returning"
                        " RESERVATION_CONFLICT\n");
                cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               ret = EINVAL;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3863,7 +3863,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
         */
        if (!cmd->se_sess) {
                cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        if (cmd->data_length < 24) {
index 5660916..94c03d2 100644 (file)
@@ -1820,6 +1820,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
        case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
        case TCM_UNKNOWN_MODE_PAGE:
        case TCM_WRITE_PROTECTED:
+       case TCM_ADDRESS_OUT_OF_RANGE:
        case TCM_CHECK_CONDITION_ABORT_CMD:
        case TCM_CHECK_CONDITION_UNIT_ATTENTION:
        case TCM_CHECK_CONDITION_NOT_READY:
@@ -4496,6 +4497,15 @@ int transport_send_check_condition_and_sense(
                /* WRITE PROTECTED */
                buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
                break;
+       case TCM_ADDRESS_OUT_OF_RANGE:
+               /* CURRENT ERROR */
+               buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+               /* ILLEGAL REQUEST */
+               buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+               /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
+               buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21;
+               break;
        case TCM_CHECK_CONDITION_UNIT_ATTENTION:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
index d95cfe2..278819c 100644 (file)
@@ -249,6 +249,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
 {
        struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
 
+       if (cmd->aborted)
+               return ~0;
        return fc_seq_exch(cmd->seq)->rxid;
 }
 
index 3269213..64ddb63 100644 (file)
@@ -61,7 +61,8 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
        struct ft_tport *tport;
        int i;
 
-       tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
+       tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP],
+                                         lockdep_is_held(&ft_lport_lock));
        if (tport && tport->tpg)
                return tport;
 
index 19fb5fa..9aaed0d 100644 (file)
@@ -473,6 +473,8 @@ retry:
                        goto retry;
                }
                if (!desc->reslength) { /* zero length read */
+                       dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
+                       clear_bit(WDM_READ, &desc->flags);
                        spin_unlock_irq(&desc->iuspin);
                        goto retry;
                }
index f6ff837..a9df218 100644 (file)
@@ -1555,10 +1555,14 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
        void __user *addr = as->userurb;
        unsigned int i;
 
-       if (as->userbuffer && urb->actual_length)
-               if (copy_to_user(as->userbuffer, urb->transfer_buffer,
-                                urb->actual_length))
+       if (as->userbuffer && urb->actual_length) {
+               if (urb->number_of_packets > 0)         /* Isochronous */
+                       i = urb->transfer_buffer_length;
+               else                                    /* Non-Isoc */
+                       i = urb->actual_length;
+               if (copy_to_user(as->userbuffer, urb->transfer_buffer, i))
                        return -EFAULT;
+       }
        if (put_user(as->status, &userurb->status))
                return -EFAULT;
        if (put_user(urb->actual_length, &userurb->actual_length))
index 52d27ed..175b6bb 100644 (file)
@@ -2039,12 +2039,16 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
 static int hub_port_reset(struct usb_hub *hub, int port1,
                        struct usb_device *udev, unsigned int delay, bool warm);
 
-/* Is a USB 3.0 port in the Inactive state? */
-static bool hub_port_inactive(struct usb_hub *hub, u16 portstatus)
+/* Is a USB 3.0 port in the Inactive or Complinance Mode state?
+ * Port worm reset is required to recover
+ */
+static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus)
 {
        return hub_is_superspeed(hub->hdev) &&
-               (portstatus & USB_PORT_STAT_LINK_STATE) ==
-               USB_SS_PORT_LS_SS_INACTIVE;
+               (((portstatus & USB_PORT_STAT_LINK_STATE) ==
+                 USB_SS_PORT_LS_SS_INACTIVE) ||
+                ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+                 USB_SS_PORT_LS_COMP_MOD)) ;
 }
 
 static int hub_port_wait_reset(struct usb_hub *hub, int port1,
@@ -2080,7 +2084,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
                         *
                         * See https://bugzilla.kernel.org/show_bug.cgi?id=41752
                         */
-                       if (hub_port_inactive(hub, portstatus)) {
+                       if (hub_port_warm_reset_required(hub, portstatus)) {
                                int ret;
 
                                if ((portchange & USB_PORT_STAT_C_CONNECTION))
@@ -3646,9 +3650,7 @@ static void hub_events(void)
                        /* Warm reset a USB3 protocol port if it's in
                         * SS.Inactive state.
                         */
-                       if (hub_is_superspeed(hub->hdev) &&
-                               (portstatus & USB_PORT_STAT_LINK_STATE)
-                                       == USB_SS_PORT_LS_SS_INACTIVE) {
+                       if (hub_port_warm_reset_required(hub, portstatus)) {
                                dev_dbg(hub_dev, "warm reset port %d\n", i);
                                hub_port_reset(hub, i, NULL,
                                                HUB_BH_RESET_TIME, true);
index 29c854b..4e1f0aa 100644 (file)
@@ -796,12 +796,6 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
 
        SET_ETHTOOL_OPS(net, &ops);
 
-       /* two kinds of host-initiated state changes:
-        *  - iff DATA transfer is active, carrier is "on"
-        *  - tx queueing enabled if open *and* carrier is "on"
-        */
-       netif_carrier_off(net);
-
        dev->gadget = g;
        SET_NETDEV_DEV(net, &g->dev);
        SET_NETDEV_DEVTYPE(net, &gadget_type);
@@ -815,6 +809,12 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
                INFO(dev, "HOST MAC %pM\n", dev->host_mac);
 
                the_dev = dev;
+
+               /* two kinds of host-initiated state changes:
+                *  - iff DATA transfer is active, carrier is "on"
+                *  - tx queueing enabled if open *and* carrier is "on"
+                */
+               netif_carrier_off(net);
        }
 
        return status;
index a8b2980..fd8a2c2 100644 (file)
@@ -438,6 +438,42 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
        }
 }
 
+/* Updates Link Status for super Speed port */
+static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
+{
+       u32 pls = status_reg & PORT_PLS_MASK;
+
+       /* resume state is a xHCI internal state.
+        * Do not report it to usb core.
+        */
+       if (pls == XDEV_RESUME)
+               return;
+
+       /* When the CAS bit is set then warm reset
+        * should be performed on port
+        */
+       if (status_reg & PORT_CAS) {
+               /* The CAS bit can be set while the port is
+                * in any link state.
+                * Only roothubs have CAS bit, so we
+                * pretend to be in compliance mode
+                * unless we're already in compliance
+                * or the inactive state.
+                */
+               if (pls != USB_SS_PORT_LS_COMP_MOD &&
+                   pls != USB_SS_PORT_LS_SS_INACTIVE) {
+                       pls = USB_SS_PORT_LS_COMP_MOD;
+               }
+               /* Return also connection bit -
+                * hub state machine resets port
+                * when this bit is set.
+                */
+               pls |= USB_PORT_STAT_CONNECTION;
+       }
+       /* update status field */
+       *status |= pls;
+}
+
 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                u16 wIndex, char *buf, u16 wLength)
 {
@@ -579,13 +615,9 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                        else
                                status |= USB_PORT_STAT_POWER;
                }
-               /* Port Link State */
+               /* Update Port Link State for super speed ports*/
                if (hcd->speed == HCD_USB3) {
-                       /* resume state is a xHCI internal state.
-                        * Do not report it to usb core.
-                        */
-                       if ((temp & PORT_PLS_MASK) != XDEV_RESUME)
-                               status |= (temp & PORT_PLS_MASK);
+                       xhci_hub_report_link_state(&status, temp);
                }
                if (bus_state->port_c_suspend & (1 << wIndex))
                        status |= 1 << USB_PORT_FEAT_C_SUSPEND;
index 363b141..7a56805 100644 (file)
@@ -341,7 +341,11 @@ struct xhci_op_regs {
 #define PORT_PLC       (1 << 22)
 /* port configure error change - port failed to configure its link partner */
 #define PORT_CEC       (1 << 23)
-/* bit 24 reserved */
+/* Cold Attach Status - xHC can set this bit to report device attached during
+ * Sx state. Warm port reset should be perfomed to clear this bit and move port
+ * to connected state.
+ */
+#define PORT_CAS       (1 << 24)
 /* wake on connect (enable) */
 #define PORT_WKCONN_E  (1 << 25)
 /* wake on disconnect (enable) */
index aa0c43f..35e6b5f 100644 (file)
@@ -93,6 +93,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
        { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
        { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
+       { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
        { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
        { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
        { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
@@ -134,7 +135,13 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
        { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
        { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
+       { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
+       { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
        { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
+       { USB_DEVICE(0x166A, 0x0304) }, /* Clipsal 5000CT2 C-Bus Black and White Touchscreen */
+       { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
+       { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
+       { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
        { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
        { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
        { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
@@ -146,7 +153,11 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
        { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
        { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+       { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+       { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
        { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+       { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
+       { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
        { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
        { } /* Terminating Entry */
 };
index 61d6c31..d89aac1 100644 (file)
@@ -235,6 +235,7 @@ static void option_instat_callback(struct urb *urb);
 #define NOVATELWIRELESS_PRODUCT_G1             0xA001
 #define NOVATELWIRELESS_PRODUCT_G1_M           0xA002
 #define NOVATELWIRELESS_PRODUCT_G2             0xA010
+#define NOVATELWIRELESS_PRODUCT_MC551          0xB001
 
 /* AMOI PRODUCTS */
 #define AMOI_VENDOR_ID                         0x1614
@@ -495,6 +496,19 @@ static void option_instat_callback(struct urb *urb);
 
 /* MediaTek products */
 #define MEDIATEK_VENDOR_ID                     0x0e8d
+#define MEDIATEK_PRODUCT_DC_1COM               0x00a0
+#define MEDIATEK_PRODUCT_DC_4COM               0x00a5
+#define MEDIATEK_PRODUCT_DC_5COM               0x00a4
+#define MEDIATEK_PRODUCT_7208_1COM             0x7101
+#define MEDIATEK_PRODUCT_7208_2COM             0x7102
+#define MEDIATEK_PRODUCT_FP_1COM               0x0003
+#define MEDIATEK_PRODUCT_FP_2COM               0x0023
+#define MEDIATEK_PRODUCT_FPDC_1COM             0x0043
+#define MEDIATEK_PRODUCT_FPDC_2COM             0x0033
+
+/* Cellient products */
+#define CELLIENT_VENDOR_ID                     0x2692
+#define CELLIENT_PRODUCT_MEN200                        0x9005
 
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
@@ -548,6 +562,10 @@ static const struct option_blacklist_info net_intf1_blacklist = {
        .reserved = BIT(1),
 };
 
+static const struct option_blacklist_info net_intf2_blacklist = {
+       .reserved = BIT(2),
+};
+
 static const struct option_blacklist_info net_intf3_blacklist = {
        .reserved = BIT(3),
 };
@@ -730,6 +748,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
+       /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
+       { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
 
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -912,8 +932,12 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
@@ -1086,6 +1110,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
+               .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
          0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
@@ -1227,6 +1253,18 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) },        /* MediaTek MT6276M modem & app port */
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_1COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_1COM, 0x02, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_2COM, 0x02, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_1COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index c14c42b..ae66278 100644 (file)
@@ -222,6 +222,8 @@ static int vhost_worker(void *data)
                if (work) {
                        __set_current_state(TASK_RUNNING);
                        work->fn(work);
+                       if (need_resched())
+                               schedule();
                } else
                        schedule();
 
index 0b39458..03321e5 100644 (file)
@@ -206,10 +206,17 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
 
                work->ordered_func(work);
 
-               /* now take the lock again and call the freeing code */
+               /* now take the lock again and drop our item from the list */
                spin_lock(&workers->order_lock);
                list_del(&work->order_list);
+               spin_unlock(&workers->order_lock);
+
+               /*
+                * we don't want to call the ordered free functions
+                * with the lock held though
+                */
                work->ordered_free(work);
+               spin_lock(&workers->order_lock);
        }
 
        spin_unlock(&workers->order_lock);
index f44b392..6b2a724 100644 (file)
@@ -872,7 +872,8 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
 
 #ifdef CONFIG_MIGRATION
 static int btree_migratepage(struct address_space *mapping,
-                       struct page *newpage, struct page *page)
+                       struct page *newpage, struct page *page,
+                       enum migrate_mode mode)
 {
        /*
         * we can't safely write a btree page from here,
@@ -887,7 +888,7 @@ static int btree_migratepage(struct address_space *mapping,
        if (page_has_private(page) &&
            !try_to_release_page(page, GFP_KERNEL))
                return -EAGAIN;
-       return migrate_page(mapping, newpage, page);
+       return migrate_page(mapping, newpage, page, mode);
 }
 #endif
 
index 3568374..19b127c 100644 (file)
@@ -692,6 +692,8 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
        kfree(name);
 
        iput(inode);
+
+       btrfs_run_delayed_items(trans, root);
        return ret;
 }
 
@@ -897,6 +899,7 @@ again:
                                ret = btrfs_unlink_inode(trans, root, dir,
                                                         inode, victim_name,
                                                         victim_name_len);
+                               btrfs_run_delayed_items(trans, root);
                        }
                        kfree(victim_name);
                        ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
@@ -1477,6 +1480,9 @@ again:
                        ret = btrfs_unlink_inode(trans, root, dir, inode,
                                                 name, name_len);
                        BUG_ON(ret);
+
+                       btrfs_run_delayed_items(trans, root);
+
                        kfree(name);
                        iput(inode);
 
index c807931..4115eca 100644 (file)
@@ -1087,6 +1087,9 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
 static struct buffer_head *
 __getblk_slow(struct block_device *bdev, sector_t block, int size)
 {
+       int ret;
+       struct buffer_head *bh;
+
        /* Size must be multiple of hard sectorsize */
        if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
                        (size < 512 || size > PAGE_SIZE))) {
@@ -1099,20 +1102,21 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
                return NULL;
        }
 
-       for (;;) {
-               struct buffer_head * bh;
-               int ret;
+retry:
+       bh = __find_get_block(bdev, block, size);
+       if (bh)
+               return bh;
 
+       ret = grow_buffers(bdev, block, size);
+       if (ret == 0) {
+               free_more_memory();
+               goto retry;
+       } else if (ret > 0) {
                bh = __find_get_block(bdev, block, size);
                if (bh)
                        return bh;
-
-               ret = grow_buffers(bdev, block, size);
-               if (ret < 0)
-                       return NULL;
-               if (ret == 0)
-                       free_more_memory();
        }
+       return NULL;
 }
 
 /*
index 6aa7457..c858a29 100644 (file)
@@ -89,6 +89,32 @@ static struct {
 /* Forward declarations */
 static void cifs_readv_complete(struct work_struct *work);
 
+#ifdef CONFIG_HIGHMEM
+/*
+ * On arches that have high memory, kmap address space is limited. By
+ * serializing the kmap operations on those arches, we ensure that we don't
+ * end up with a bunch of threads in writeback with partially mapped page
+ * arrays, stuck waiting for kmap to come back. That situation prevents
+ * progress and can deadlock.
+ */
+static DEFINE_MUTEX(cifs_kmap_mutex);
+
+static inline void
+cifs_kmap_lock(void)
+{
+       mutex_lock(&cifs_kmap_mutex);
+}
+
+static inline void
+cifs_kmap_unlock(void)
+{
+       mutex_unlock(&cifs_kmap_mutex);
+}
+#else /* !CONFIG_HIGHMEM */
+#define cifs_kmap_lock() do { ; } while(0)
+#define cifs_kmap_unlock() do { ; } while(0)
+#endif /* CONFIG_HIGHMEM */
+
 /* Mark as invalid, all open files on tree connections since they
    were closed when session to server was lost */
 static void mark_open_files_invalid(struct cifs_tcon *pTcon)
@@ -1540,6 +1566,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
        eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
        cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
 
+       cifs_kmap_lock();
        list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
                if (remaining >= PAGE_CACHE_SIZE) {
                        /* enough data to fill the page */
@@ -1589,6 +1616,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
                        page_cache_release(page);
                }
        }
+       cifs_kmap_unlock();
 
        /* issue the read if we have any iovecs left to fill */
        if (rdata->nr_iov > 1) {
@@ -2171,6 +2199,7 @@ cifs_async_writev(struct cifs_writedata *wdata)
        iov[0].iov_base = smb;
 
        /* marshal up the pages into iov array */
+       cifs_kmap_lock();
        wdata->bytes = 0;
        for (i = 0; i < wdata->nr_pages; i++) {
                iov[i + 1].iov_len = min(inode->i_size -
@@ -2179,6 +2208,7 @@ cifs_async_writev(struct cifs_writedata *wdata)
                iov[i + 1].iov_base = kmap(wdata->pages[i]);
                wdata->bytes += iov[i + 1].iov_len;
        }
+       cifs_kmap_unlock();
 
        cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
 
index 9e0675a..56c152d 100644 (file)
@@ -2925,6 +2925,18 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
 #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
 #define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
 
+/*
+ * On hosts with high memory, we can't currently support wsize/rsize that are
+ * larger than we can kmap at once. Cap the rsize/wsize at
+ * LAST_PKMAP * PAGE_SIZE. We'll never be able to fill a read or write request
+ * larger than that anyway.
+ */
+#ifdef CONFIG_HIGHMEM
+#define CIFS_KMAP_SIZE_LIMIT   (LAST_PKMAP * PAGE_CACHE_SIZE)
+#else /* CONFIG_HIGHMEM */
+#define CIFS_KMAP_SIZE_LIMIT   (1<<24)
+#endif /* CONFIG_HIGHMEM */
+
 static unsigned int
 cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
 {
@@ -2955,6 +2967,9 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
                wsize = min_t(unsigned int, wsize,
                                server->maxBuf - sizeof(WRITE_REQ) + 4);
 
+       /* limit to the amount that we can kmap at once */
+       wsize = min_t(unsigned int, wsize, CIFS_KMAP_SIZE_LIMIT);
+
        /* hard limit of CIFS_MAX_WSIZE */
        wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
 
@@ -2975,18 +2990,15 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
         * MS-CIFS indicates that servers are only limited by the client's
         * bufsize for reads, testing against win98se shows that it throws
         * INVALID_PARAMETER errors if you try to request too large a read.
+        * OS/2 just sends back short reads.
         *
-        * If the server advertises a MaxBufferSize of less than one page,
-        * assume that it also can't satisfy reads larger than that either.
-        *
-        * FIXME: Is there a better heuristic for this?
+        * If the server doesn't advertise CAP_LARGE_READ_X, then assume that
+        * it can't handle a read request larger than its MaxBufferSize either.
         */
        if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
                defsize = CIFS_DEFAULT_IOSIZE;
        else if (server->capabilities & CAP_LARGE_READ_X)
                defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
-       else if (server->maxBuf >= PAGE_CACHE_SIZE)
-               defsize = CIFSMaxBufSize;
        else
                defsize = server->maxBuf - sizeof(READ_RSP);
 
@@ -2999,6 +3011,9 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
        if (!(server->capabilities & CAP_LARGE_READ_X))
                rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
 
+       /* limit to the amount that we can kmap at once */
+       rsize = min_t(unsigned int, rsize, CIFS_KMAP_SIZE_LIMIT);
+
        /* hard limit of CIFS_MAX_RSIZE */
        rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
 
index db4a138..4c37ed4 100644 (file)
@@ -86,9 +86,12 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
 
        dentry = d_lookup(parent, name);
        if (dentry) {
-               /* FIXME: check for inode number changes? */
-               if (dentry->d_inode != NULL)
+               inode = dentry->d_inode;
+               /* update inode in place if i_ino didn't change */
+               if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
+                       cifs_fattr_to_inode(inode, fattr);
                        return dentry;
+               }
                d_drop(dentry);
                dput(dentry);
        }
index 69f994a..0dbe58a 100644 (file)
@@ -149,7 +149,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
        (*lower_file) = dentry_open(lower_dentry, lower_mnt, flags, cred);
        if (!IS_ERR(*lower_file))
                goto out;
-       if (flags & O_RDONLY) {
+       if ((flags & O_ACCMODE) == O_RDONLY) {
                rc = PTR_ERR((*lower_file));
                goto out;
        }
index 0dc5a3d..de42310 100644 (file)
@@ -49,7 +49,10 @@ ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
        mutex_lock(&ecryptfs_daemon_hash_mux);
        /* TODO: Just use file->private_data? */
        rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
-       BUG_ON(rc || !daemon);
+       if (rc || !daemon) {
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
+               return -EINVAL;
+       }
        mutex_lock(&daemon->mux);
        mutex_unlock(&ecryptfs_daemon_hash_mux);
        if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
@@ -122,6 +125,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
                goto out_unlock_daemon;
        }
        daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN;
+       file->private_data = daemon;
        atomic_inc(&ecryptfs_num_miscdev_opens);
 out_unlock_daemon:
        mutex_unlock(&daemon->mux);
@@ -152,9 +156,9 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)
 
        mutex_lock(&ecryptfs_daemon_hash_mux);
        rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
-       BUG_ON(rc || !daemon);
+       if (rc || !daemon)
+               daemon = file->private_data;
        mutex_lock(&daemon->mux);
-       BUG_ON(daemon->pid != task_pid(current));
        BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN));
        daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN;
        atomic_dec(&ecryptfs_num_miscdev_opens);
@@ -191,31 +195,32 @@ int ecryptfs_send_miscdev(char *data, size_t data_size,
                          struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
                          u16 msg_flags, struct ecryptfs_daemon *daemon)
 {
-       int rc = 0;
+       struct ecryptfs_message *msg;
 
-       mutex_lock(&msg_ctx->mux);
-       msg_ctx->msg = kmalloc((sizeof(*msg_ctx->msg) + data_size),
-                              GFP_KERNEL);
-       if (!msg_ctx->msg) {
-               rc = -ENOMEM;
+       msg = kmalloc((sizeof(*msg) + data_size), GFP_KERNEL);
+       if (!msg) {
                printk(KERN_ERR "%s: Out of memory whilst attempting "
                       "to kmalloc(%zd, GFP_KERNEL)\n", __func__,
-                      (sizeof(*msg_ctx->msg) + data_size));
-               goto out_unlock;
+                      (sizeof(*msg) + data_size));
+               return -ENOMEM;
        }
+
+       mutex_lock(&msg_ctx->mux);
+       msg_ctx->msg = msg;
        msg_ctx->msg->index = msg_ctx->index;
        msg_ctx->msg->data_len = data_size;
        msg_ctx->type = msg_type;
        memcpy(msg_ctx->msg->data, data, data_size);
        msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size);
-       mutex_lock(&daemon->mux);
        list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue);
+       mutex_unlock(&msg_ctx->mux);
+
+       mutex_lock(&daemon->mux);
        daemon->num_queued_msg_ctx++;
        wake_up_interruptible(&daemon->wait);
        mutex_unlock(&daemon->mux);
-out_unlock:
-       mutex_unlock(&msg_ctx->mux);
-       return rc;
+
+       return 0;
 }
 
 /**
@@ -246,8 +251,16 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
        mutex_lock(&ecryptfs_daemon_hash_mux);
        /* TODO: Just use file->private_data? */
        rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
-       BUG_ON(rc || !daemon);
+       if (rc || !daemon) {
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
+               return -EINVAL;
+       }
        mutex_lock(&daemon->mux);
+       if (task_pid(current) != daemon->pid) {
+               mutex_unlock(&daemon->mux);
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
+               return -EPERM;
+       }
        if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
                rc = 0;
                mutex_unlock(&ecryptfs_daemon_hash_mux);
@@ -284,9 +297,6 @@ check_list:
                 * message from the queue; try again */
                goto check_list;
        }
-       BUG_ON(euid != daemon->euid);
-       BUG_ON(current_user_ns() != daemon->user_ns);
-       BUG_ON(task_pid(current) != daemon->pid);
        msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue,
                                   struct ecryptfs_msg_ctx, daemon_out_list);
        BUG_ON(!msg_ctx);
index 4d9d3a4..a6f3763 100644 (file)
@@ -1629,8 +1629,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
        if (op == EPOLL_CTL_ADD) {
                if (is_file_epoll(tfile)) {
                        error = -ELOOP;
-                       if (ep_loop_check(ep, tfile) != 0)
+                       if (ep_loop_check(ep, tfile) != 0) {
+                               clear_tfile_check_list();
                                goto error_tgt_fput;
+                       }
                } else
                        list_add(&tfile->f_tfile_llink, &tfile_check_list);
        }
index 49cf230..24a49d4 100644 (file)
@@ -735,13 +735,7 @@ static int _prepare_for_striping(struct ore_io_state *ios)
 out:
        ios->numdevs = devs_in_group;
        ios->pages_consumed = cur_pg;
-       if (unlikely(ret)) {
-               if (length == ios->length)
-                       return ret;
-               else
-                       ios->length -= length;
-       }
-       return 0;
+       return ret;
 }
 
 int ore_create(struct ore_io_state *ios)
index d222c77..fff2070 100644 (file)
@@ -461,16 +461,12 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
  * ios->sp2d[p][*], xor is calculated the same way. These pages are
  * allocated/freed and don't go through cache
  */
-static int _read_4_write(struct ore_io_state *ios)
+static int _read_4_write_first_stripe(struct ore_io_state *ios)
 {
-       struct ore_io_state *ios_read;
        struct ore_striping_info read_si;
        struct __stripe_pages_2d *sp2d = ios->sp2d;
        u64 offset = ios->si.first_stripe_start;
-       u64 last_stripe_end;
-       unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
-       unsigned i, c, p, min_p = sp2d->pages_in_unit, max_p = -1;
-       int ret;
+       unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
 
        if (offset == ios->offset) /* Go to start collect $200 */
                goto read_last_stripe;
@@ -478,6 +474,9 @@ static int _read_4_write(struct ore_io_state *ios)
        min_p = _sp2d_min_pg(sp2d);
        max_p = _sp2d_max_pg(sp2d);
 
+       ORE_DBGMSG("stripe_start=0x%llx ios->offset=0x%llx min_p=%d max_p=%d\n",
+                  offset, ios->offset, min_p, max_p);
+
        for (c = 0; ; c++) {
                ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
                read_si.obj_offset += min_p * PAGE_SIZE;
@@ -512,6 +511,18 @@ static int _read_4_write(struct ore_io_state *ios)
        }
 
 read_last_stripe:
+       return 0;
+}
+
+static int _read_4_write_last_stripe(struct ore_io_state *ios)
+{
+       struct ore_striping_info read_si;
+       struct __stripe_pages_2d *sp2d = ios->sp2d;
+       u64 offset;
+       u64 last_stripe_end;
+       unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
+       unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
+
        offset = ios->offset + ios->length;
        if (offset % PAGE_SIZE)
                _add_to_r4w_last_page(ios, &offset);
@@ -527,15 +538,15 @@ read_last_stripe:
        c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
                       ios->layout->mirrors_p1, read_si.par_dev, read_si.dev);
 
-       BUG_ON(ios->si.first_stripe_start + bytes_in_stripe != last_stripe_end);
-       /* unaligned IO must be within a single stripe */
-
        if (min_p == sp2d->pages_in_unit) {
                /* Didn't do it yet */
                min_p = _sp2d_min_pg(sp2d);
                max_p = _sp2d_max_pg(sp2d);
        }
 
+       ORE_DBGMSG("offset=0x%llx stripe_end=0x%llx min_p=%d max_p=%d\n",
+                  offset, last_stripe_end, min_p, max_p);
+
        while (offset < last_stripe_end) {
                struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
 
@@ -568,6 +579,15 @@ read_last_stripe:
        }
 
 read_it:
+       return 0;
+}
+
+static int _read_4_write_execute(struct ore_io_state *ios)
+{
+       struct ore_io_state *ios_read;
+       unsigned i;
+       int ret;
+
        ios_read = ios->ios_read_4_write;
        if (!ios_read)
                return 0;
@@ -591,6 +611,8 @@ read_it:
        }
 
        _mark_read4write_pages_uptodate(ios_read, ret);
+       ore_put_io_state(ios_read);
+       ios->ios_read_4_write = NULL; /* Might need a reuse at last stripe */
        return 0;
 }
 
@@ -626,8 +648,11 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
                        /* If first stripe, Read in all read4write pages
                         * (if needed) before we calculate the first parity.
                         */
-                       _read_4_write(ios);
+                       _read_4_write_first_stripe(ios);
                }
+               if (!cur_len) /* If last stripe r4w pages of last stripe */
+                       _read_4_write_last_stripe(ios);
+               _read_4_write_execute(ios);
 
                for (i = 0; i < num_pages; i++) {
                        pages[i] = _raid_page_alloc();
@@ -654,34 +679,14 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
 
 int _ore_post_alloc_raid_stuff(struct ore_io_state *ios)
 {
-       struct ore_layout *layout = ios->layout;
-
        if (ios->parity_pages) {
+               struct ore_layout *layout = ios->layout;
                unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
-               unsigned stripe_size = ios->si.bytes_in_stripe;
-               u64 last_stripe, first_stripe;
 
                if (_sp2d_alloc(pages_in_unit, layout->group_width,
                                layout->parity, &ios->sp2d)) {
                        return -ENOMEM;
                }
-
-               /* Round io down to last full strip */
-               first_stripe = div_u64(ios->offset, stripe_size);
-               last_stripe = div_u64(ios->offset + ios->length, stripe_size);
-
-               /* If an IO spans more then a single stripe it must end at
-                * a stripe boundary. The reminder at the end is pushed into the
-                * next IO.
-                */
-               if (last_stripe != first_stripe) {
-                       ios->length = last_stripe * stripe_size - ios->offset;
-
-                       BUG_ON(!ios->length);
-                       ios->nr_pages = (ios->length + PAGE_SIZE - 1) /
-                                       PAGE_SIZE;
-                       ios->si.length = ios->length; /*make it consistent */
-               }
        }
        return 0;
 }
index 914bf9e..d6970f7 100644 (file)
@@ -557,7 +557,8 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
                if (bitmap_bh == NULL)
                        continue;
 
-               x = ext4_count_free(bitmap_bh, sb->s_blocksize);
+               x = ext4_count_free(bitmap_bh->b_data,
+                                   EXT4_BLOCKS_PER_GROUP(sb) / 8);
                printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
                        i, ext4_free_group_clusters(sb, gdp), x);
                bitmap_count += x;
index fa3af81..bbde5d5 100644 (file)
 #include <linux/jbd2.h>
 #include "ext4.h"
 
-#ifdef EXT4FS_DEBUG
-
 static const int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
 
-unsigned int ext4_count_free(struct buffer_head *map, unsigned int numchars)
+unsigned int ext4_count_free(char *bitmap, unsigned int numchars)
 {
        unsigned int i, sum = 0;
 
-       if (!map)
-               return 0;
        for (i = 0; i < numchars; i++)
-               sum += nibblemap[map->b_data[i] & 0xf] +
-                       nibblemap[(map->b_data[i] >> 4) & 0xf];
+               sum += nibblemap[bitmap[i] & 0xf] +
+                       nibblemap[(bitmap[i] >> 4) & 0xf];
        return sum;
 }
 
-#endif  /*  EXT4FS_DEBUG  */
-
index 7b1cd5c..8cb184c 100644 (file)
@@ -1123,8 +1123,7 @@ struct ext4_sb_info {
        unsigned long s_desc_per_block; /* Number of group descriptors per block */
        ext4_group_t s_groups_count;    /* Number of groups in the fs */
        ext4_group_t s_blockfile_groups;/* Groups acceptable for non-extent files */
-       unsigned long s_overhead_last;  /* Last calculated overhead */
-       unsigned long s_blocks_last;    /* Last seen block count */
+       unsigned long s_overhead;  /* # of fs overhead clusters */
        unsigned int s_cluster_ratio;   /* Number of blocks per cluster */
        unsigned int s_cluster_bits;    /* log2 of s_cluster_ratio */
        loff_t s_bitmap_maxbytes;       /* max bytes for bitmap files */
@@ -1757,7 +1756,7 @@ struct mmpd_data {
 # define NORET_AND     noreturn,
 
 /* bitmap.c */
-extern unsigned int ext4_count_free(struct buffer_head *, unsigned);
+extern unsigned int ext4_count_free(char *bitmap, unsigned numchars);
 
 /* balloc.c */
 extern unsigned int ext4_block_group(struct super_block *sb,
@@ -1925,6 +1924,7 @@ extern int ext4_group_extend(struct super_block *sb,
                                ext4_fsblk_t n_blocks_count);
 
 /* super.c */
+extern int ext4_calculate_overhead(struct super_block *sb);
 extern void *ext4_kvmalloc(size_t size, gfp_t flags);
 extern void *ext4_kvzalloc(size_t size, gfp_t flags);
 extern void ext4_kvfree(void *ptr);
index 8fb6844..6266799 100644 (file)
@@ -1057,7 +1057,8 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
                if (!bitmap_bh)
                        continue;
 
-               x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
+               x = ext4_count_free(bitmap_bh->b_data,
+                                   EXT4_INODES_PER_GROUP(sb) / 8);
                printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
                        (unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
                bitmap_count += x;
index 3ce7613..8b01f9f 100644 (file)
@@ -277,6 +277,15 @@ void ext4_da_update_reserve_space(struct inode *inode,
                used = ei->i_reserved_data_blocks;
        }
 
+       if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
+               ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
+                        "with only %d reserved metadata blocks\n", __func__,
+                        inode->i_ino, ei->i_allocated_meta_blocks,
+                        ei->i_reserved_meta_blocks);
+               WARN_ON(1);
+               ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
+       }
+
        /* Update per-inode reservations */
        ei->i_reserved_data_blocks -= used;
        ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
@@ -1102,6 +1111,17 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
        struct ext4_inode_info *ei = EXT4_I(inode);
        unsigned int md_needed;
        int ret;
+       ext4_lblk_t save_last_lblock;
+       int save_len;
+
+       /*
+        * We will charge metadata quota at writeout time; this saves
+        * us from metadata over-estimation, though we may go over by
+        * a small amount in the end.  Here we just reserve for data.
+        */
+       ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
+       if (ret)
+               return ret;
 
        /*
         * recalculate the amount of metadata blocks to reserve
@@ -1110,32 +1130,31 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
         */
 repeat:
        spin_lock(&ei->i_block_reservation_lock);
+       /*
+        * ext4_calc_metadata_amount() has side effects, which we have
+        * to be prepared undo if we fail to claim space.
+        */
+       save_len = ei->i_da_metadata_calc_len;
+       save_last_lblock = ei->i_da_metadata_calc_last_lblock;
        md_needed = EXT4_NUM_B2C(sbi,
                                 ext4_calc_metadata_amount(inode, lblock));
        trace_ext4_da_reserve_space(inode, md_needed);
-       spin_unlock(&ei->i_block_reservation_lock);
 
-       /*
-        * We will charge metadata quota at writeout time; this saves
-        * us from metadata over-estimation, though we may go over by
-        * a small amount in the end.  Here we just reserve for data.
-        */
-       ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
-       if (ret)
-               return ret;
        /*
         * We do still charge estimated metadata to the sb though;
         * we cannot afford to run out of free blocks.
         */
        if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
-               dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
+               ei->i_da_metadata_calc_len = save_len;
+               ei->i_da_metadata_calc_last_lblock = save_last_lblock;
+               spin_unlock(&ei->i_block_reservation_lock);
                if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
                        yield();
                        goto repeat;
                }
+               dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
                return -ENOSPC;
        }
-       spin_lock(&ei->i_block_reservation_lock);
        ei->i_reserved_data_blocks++;
        ei->i_reserved_meta_blocks += md_needed;
        spin_unlock(&ei->i_block_reservation_lock);
index 996780a..4eac337 100644 (file)
@@ -952,6 +952,11 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
                           &sbi->s_flex_groups[flex_group].free_inodes);
        }
 
+       /*
+        * Update the fs overhead information
+        */
+       ext4_calculate_overhead(sb);
+
        ext4_handle_dirty_super(handle, sb);
 
 exit_journal:
index ab7aa3f..a071348 100644 (file)
@@ -1097,7 +1097,7 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
        }
        if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) {
                seq_printf(seq, ",max_batch_time=%u",
-                          (unsigned) sbi->s_min_batch_time);
+                          (unsigned) sbi->s_max_batch_time);
        }
 
        /*
@@ -3083,6 +3083,114 @@ static void ext4_destroy_lazyinit_thread(void)
        kthread_stop(ext4_lazyinit_task);
 }
 
+/*
+ * Note: calculating the overhead so we can be compatible with
+ * historical BSD practice is quite difficult in the face of
+ * clusters/bigalloc.  This is because multiple metadata blocks from
+ * different block group can end up in the same allocation cluster.
+ * Calculating the exact overhead in the face of clustered allocation
+ * requires either O(all block bitmaps) in memory or O(number of block
+ * groups**2) in time.  We will still calculate the superblock for
+ * older file systems --- and if we come across with a bigalloc file
+ * system with zero in s_overhead_clusters the estimate will be close to
+ * correct especially for very large cluster sizes --- but for newer
+ * file systems, it's better to calculate this figure once at mkfs
+ * time, and store it in the superblock.  If the superblock value is
+ * present (even for non-bigalloc file systems), we will use it.
+ */
+static int count_overhead(struct super_block *sb, ext4_group_t grp,
+                         char *buf)
+{
+       struct ext4_sb_info     *sbi = EXT4_SB(sb);
+       struct ext4_group_desc  *gdp;
+       ext4_fsblk_t            first_block, last_block, b;
+       ext4_group_t            i, ngroups = ext4_get_groups_count(sb);
+       int                     s, j, count = 0;
+
+       first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
+               (grp * EXT4_BLOCKS_PER_GROUP(sb));
+       last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
+       for (i = 0; i < ngroups; i++) {
+               gdp = ext4_get_group_desc(sb, i, NULL);
+               b = ext4_block_bitmap(sb, gdp);
+               if (b >= first_block && b <= last_block) {
+                       ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
+                       count++;
+               }
+               b = ext4_inode_bitmap(sb, gdp);
+               if (b >= first_block && b <= last_block) {
+                       ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
+                       count++;
+               }
+               b = ext4_inode_table(sb, gdp);
+               if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
+                       for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
+                               int c = EXT4_B2C(sbi, b - first_block);
+                               ext4_set_bit(c, buf);
+                               count++;
+                       }
+               if (i != grp)
+                       continue;
+               s = 0;
+               if (ext4_bg_has_super(sb, grp)) {
+                       ext4_set_bit(s++, buf);
+                       count++;
+               }
+               for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
+                       ext4_set_bit(EXT4_B2C(sbi, s++), buf);
+                       count++;
+               }
+       }
+       if (!count)
+               return 0;
+       return EXT4_CLUSTERS_PER_GROUP(sb) -
+               ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
+}
+
+/*
+ * Compute the overhead and stash it in sbi->s_overhead
+ */
+int ext4_calculate_overhead(struct super_block *sb)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_super_block *es = sbi->s_es;
+       ext4_group_t i, ngroups = ext4_get_groups_count(sb);
+       ext4_fsblk_t overhead = 0;
+       char *buf = (char *) get_zeroed_page(GFP_KERNEL);
+
+       memset(buf, 0, PAGE_SIZE);
+       if (!buf)
+               return -ENOMEM;
+
+       /*
+        * Compute the overhead (FS structures).  This is constant
+        * for a given filesystem unless the number of block groups
+        * changes so we cache the previous value until it does.
+        */
+
+       /*
+        * All of the blocks before first_data_block are overhead
+        */
+       overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
+
+       /*
+        * Add the overhead found in each block group
+        */
+       for (i = 0; i < ngroups; i++) {
+               int blks;
+
+               blks = count_overhead(sb, i, buf);
+               overhead += blks;
+               if (blks)
+                       memset(buf, 0, PAGE_SIZE);
+               cond_resched();
+       }
+       sbi->s_overhead = overhead;
+       smp_wmb();
+       free_page((unsigned long) buf);
+       return 0;
+}
+
 static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 {
        char *orig_data = kstrdup(data, GFP_KERNEL);
@@ -3694,6 +3802,18 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        percpu_counter_set(&sbi->s_dirtyclusters_counter, 0);
 
 no_journal:
+       /*
+        * Get the # of file system overhead blocks from the
+        * superblock if present.
+        */
+       if (es->s_overhead_clusters)
+               sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
+       else {
+               ret = ext4_calculate_overhead(sb);
+               if (ret)
+                       goto failed_mount_wq;
+       }
+
        /*
         * The maximum number of concurrent works can be high and
         * concurrency isn't really necessary.  Limit it to 1.
@@ -4568,67 +4688,21 @@ restore_opts:
        return err;
 }
 
-/*
- * Note: calculating the overhead so we can be compatible with
- * historical BSD practice is quite difficult in the face of
- * clusters/bigalloc.  This is because multiple metadata blocks from
- * different block group can end up in the same allocation cluster.
- * Calculating the exact overhead in the face of clustered allocation
- * requires either O(all block bitmaps) in memory or O(number of block
- * groups**2) in time.  We will still calculate the superblock for
- * older file systems --- and if we come across with a bigalloc file
- * system with zero in s_overhead_clusters the estimate will be close to
- * correct especially for very large cluster sizes --- but for newer
- * file systems, it's better to calculate this figure once at mkfs
- * time, and store it in the superblock.  If the superblock value is
- * present (even for non-bigalloc file systems), we will use it.
- */
 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
        struct super_block *sb = dentry->d_sb;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct ext4_super_block *es = sbi->s_es;
-       struct ext4_group_desc *gdp;
+       ext4_fsblk_t overhead = 0;
        u64 fsid;
        s64 bfree;
 
-       if (test_opt(sb, MINIX_DF)) {
-               sbi->s_overhead_last = 0;
-       } else if (es->s_overhead_clusters) {
-               sbi->s_overhead_last = le32_to_cpu(es->s_overhead_clusters);
-       } else if (sbi->s_blocks_last != ext4_blocks_count(es)) {
-               ext4_group_t i, ngroups = ext4_get_groups_count(sb);
-               ext4_fsblk_t overhead = 0;
-
-               /*
-                * Compute the overhead (FS structures).  This is constant
-                * for a given filesystem unless the number of block groups
-                * changes so we cache the previous value until it does.
-                */
-
-               /*
-                * All of the blocks before first_data_block are
-                * overhead
-                */
-               overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
-
-               /*
-                * Add the overhead found in each block group
-                */
-               for (i = 0; i < ngroups; i++) {
-                       gdp = ext4_get_group_desc(sb, i, NULL);
-                       overhead += ext4_num_overhead_clusters(sb, i, gdp);
-                       cond_resched();
-               }
-               sbi->s_overhead_last = overhead;
-               smp_wmb();
-               sbi->s_blocks_last = ext4_blocks_count(es);
-       }
+       if (!test_opt(sb, MINIX_DF))
+               overhead = sbi->s_overhead;
 
        buf->f_type = EXT4_SUPER_MAGIC;
        buf->f_bsize = sb->s_blocksize;
-       buf->f_blocks = (ext4_blocks_count(es) -
-                        EXT4_C2B(sbi, sbi->s_overhead_last));
+       buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, sbi->s_overhead);
        bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
                percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
        /* prevent underflow in case that few free space is available */
index b1a524d..cf6f434 100644 (file)
--- a/fs/fifo.c
+++ b/fs/fifo.c
@@ -14,7 +14,7 @@
 #include <linux/sched.h>
 #include <linux/pipe_fs_i.h>
 
-static void wait_for_partner(struct inode* inode, unsigned int *cnt)
+static int wait_for_partner(struct inode* inode, unsigned int *cnt)
 {
        int cur = *cnt; 
 
@@ -23,6 +23,7 @@ static void wait_for_partner(struct inode* inode, unsigned int *cnt)
                if (signal_pending(current))
                        break;
        }
+       return cur == *cnt ? -ERESTARTSYS : 0;
 }
 
 static void wake_up_partner(struct inode* inode)
@@ -67,8 +68,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
                                 * seen a writer */
                                filp->f_version = pipe->w_counter;
                        } else {
-                               wait_for_partner(inode, &pipe->w_counter);
-                               if(signal_pending(current))
+                               if (wait_for_partner(inode, &pipe->w_counter))
                                        goto err_rd;
                        }
                }
@@ -90,8 +90,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
                        wake_up_partner(inode);
 
                if (!pipe->readers) {
-                       wait_for_partner(inode, &pipe->r_counter);
-                       if (signal_pending(current))
+                       if (wait_for_partner(inode, &pipe->r_counter))
                                goto err_wr;
                }
                break;
index 2d0ca24..0aa424a 100644 (file)
@@ -569,7 +569,8 @@ static int hugetlbfs_set_page_dirty(struct page *page)
 }
 
 static int hugetlbfs_migrate_page(struct address_space *mapping,
-                               struct page *newpage, struct page *page)
+                               struct page *newpage, struct page *page,
+                               enum migrate_mode mode)
 {
        int rc;
 
@@ -592,9 +593,15 @@ static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
                spin_lock(&sbinfo->stat_lock);
                /* If no limits set, just report 0 for max/free/used
                 * blocks, like simple_statfs() */
-               if (sbinfo->max_blocks >= 0) {
-                       buf->f_blocks = sbinfo->max_blocks;
-                       buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
+               if (sbinfo->spool) {
+                       long free_pages;
+
+                       spin_lock(&sbinfo->spool->lock);
+                       buf->f_blocks = sbinfo->spool->max_hpages;
+                       free_pages = sbinfo->spool->max_hpages
+                               - sbinfo->spool->used_hpages;
+                       buf->f_bavail = buf->f_bfree = free_pages;
+                       spin_unlock(&sbinfo->spool->lock);
                        buf->f_files = sbinfo->max_inodes;
                        buf->f_ffree = sbinfo->free_inodes;
                }
@@ -610,6 +617,10 @@ static void hugetlbfs_put_super(struct super_block *sb)
 
        if (sbi) {
                sb->s_fs_info = NULL;
+
+               if (sbi->spool)
+                       hugepage_put_subpool(sbi->spool);
+
                kfree(sbi);
        }
 }
@@ -841,10 +852,14 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_fs_info = sbinfo;
        sbinfo->hstate = config.hstate;
        spin_lock_init(&sbinfo->stat_lock);
-       sbinfo->max_blocks = config.nr_blocks;
-       sbinfo->free_blocks = config.nr_blocks;
        sbinfo->max_inodes = config.nr_inodes;
        sbinfo->free_inodes = config.nr_inodes;
+       sbinfo->spool = NULL;
+       if (config.nr_blocks != -1) {
+               sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
+               if (!sbinfo->spool)
+                       goto out_free;
+       }
        sb->s_maxbytes = MAX_LFS_FILESIZE;
        sb->s_blocksize = huge_page_size(config.hstate);
        sb->s_blocksize_bits = huge_page_shift(config.hstate);
@@ -864,38 +879,12 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_root = root;
        return 0;
 out_free:
+       if (sbinfo->spool)
+               kfree(sbinfo->spool);
        kfree(sbinfo);
        return -ENOMEM;
 }
 
-int hugetlb_get_quota(struct address_space *mapping, long delta)
-{
-       int ret = 0;
-       struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
-
-       if (sbinfo->free_blocks > -1) {
-               spin_lock(&sbinfo->stat_lock);
-               if (sbinfo->free_blocks - delta >= 0)
-                       sbinfo->free_blocks -= delta;
-               else
-                       ret = -ENOMEM;
-               spin_unlock(&sbinfo->stat_lock);
-       }
-
-       return ret;
-}
-
-void hugetlb_put_quota(struct address_space *mapping, long delta)
-{
-       struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
-
-       if (sbinfo->free_blocks > -1) {
-               spin_lock(&sbinfo->stat_lock);
-               sbinfo->free_blocks += delta;
-               spin_unlock(&sbinfo->stat_lock);
-       }
-}
-
 static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
        int flags, const char *dev_name, void *data)
 {
index 0d68f1f..fcc50ab 100644 (file)
@@ -308,7 +308,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock,
        return 0;
 }
 
-static int assign_type(struct file_lock *fl, int type)
+static int assign_type(struct file_lock *fl, long type)
 {
        switch (type) {
        case F_RDLCK:
@@ -445,7 +445,7 @@ static const struct lock_manager_operations lease_manager_ops = {
 /*
  * Initialize a lease, use the default lock manager operations
  */
-static int lease_init(struct file *filp, int type, struct file_lock *fl)
+static int lease_init(struct file *filp, long type, struct file_lock *fl)
  {
        if (assign_type(fl, type) != 0)
                return -EINVAL;
@@ -463,7 +463,7 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl)
 }
 
 /* Allocate a file_lock initialised to this type of lease */
-static struct file_lock *lease_alloc(struct file *filp, int type)
+static struct file_lock *lease_alloc(struct file *filp, long type)
 {
        struct file_lock *fl = locks_alloc_lock();
        int error = -ENOMEM;
@@ -1465,7 +1465,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
        case F_WRLCK:
                return generic_add_lease(filp, arg, flp);
        default:
-               BUG();
+               return -EINVAL;
        }
 }
 EXPORT_SYMBOL(generic_setlease);
index 47d1c6f..b122af8 100644 (file)
@@ -318,12 +318,12 @@ struct idmap_hashent {
        unsigned long           ih_expires;
        __u32                   ih_id;
        size_t                  ih_namelen;
-       char                    ih_name[IDMAP_NAMESZ];
+       const char              *ih_name;
 };
 
 struct idmap_hashtable {
        __u8                    h_type;
-       struct idmap_hashent    h_entries[IDMAP_HASH_SZ];
+       struct idmap_hashent    *h_entries;
 };
 
 struct idmap {
@@ -378,6 +378,28 @@ nfs_idmap_new(struct nfs_client *clp)
        return 0;
 }
 
+static void
+idmap_alloc_hashtable(struct idmap_hashtable *h)
+{
+       if (h->h_entries != NULL)
+               return;
+       h->h_entries = kcalloc(IDMAP_HASH_SZ,
+                       sizeof(*h->h_entries),
+                       GFP_KERNEL);
+}
+
+static void
+idmap_free_hashtable(struct idmap_hashtable *h)
+{
+       int i;
+
+       if (h->h_entries == NULL)
+               return;
+       for (i = 0; i < IDMAP_HASH_SZ; i++)
+               kfree(h->h_entries[i].ih_name);
+       kfree(h->h_entries);
+}
+
 void
 nfs_idmap_delete(struct nfs_client *clp)
 {
@@ -387,6 +409,8 @@ nfs_idmap_delete(struct nfs_client *clp)
                return;
        rpc_unlink(idmap->idmap_dentry);
        clp->cl_idmap = NULL;
+       idmap_free_hashtable(&idmap->idmap_user_hash);
+       idmap_free_hashtable(&idmap->idmap_group_hash);
        kfree(idmap);
 }
 
@@ -396,6 +420,8 @@ nfs_idmap_delete(struct nfs_client *clp)
 static inline struct idmap_hashent *
 idmap_name_hash(struct idmap_hashtable* h, const char *name, size_t len)
 {
+       if (h->h_entries == NULL)
+               return NULL;
        return &h->h_entries[fnvhash32(name, len) % IDMAP_HASH_SZ];
 }
 
@@ -404,6 +430,8 @@ idmap_lookup_name(struct idmap_hashtable *h, const char *name, size_t len)
 {
        struct idmap_hashent *he = idmap_name_hash(h, name, len);
 
+       if (he == NULL)
+               return NULL;
        if (he->ih_namelen != len || memcmp(he->ih_name, name, len) != 0)
                return NULL;
        if (time_after(jiffies, he->ih_expires))
@@ -414,6 +442,8 @@ idmap_lookup_name(struct idmap_hashtable *h, const char *name, size_t len)
 static inline struct idmap_hashent *
 idmap_id_hash(struct idmap_hashtable* h, __u32 id)
 {
+       if (h->h_entries == NULL)
+               return NULL;
        return &h->h_entries[fnvhash32(&id, sizeof(id)) % IDMAP_HASH_SZ];
 }
 
@@ -421,6 +451,9 @@ static struct idmap_hashent *
 idmap_lookup_id(struct idmap_hashtable *h, __u32 id)
 {
        struct idmap_hashent *he = idmap_id_hash(h, id);
+
+       if (he == NULL)
+               return NULL;
        if (he->ih_id != id || he->ih_namelen == 0)
                return NULL;
        if (time_after(jiffies, he->ih_expires))
@@ -436,12 +469,14 @@ idmap_lookup_id(struct idmap_hashtable *h, __u32 id)
 static inline struct idmap_hashent *
 idmap_alloc_name(struct idmap_hashtable *h, char *name, size_t len)
 {
+       idmap_alloc_hashtable(h);
        return idmap_name_hash(h, name, len);
 }
 
 static inline struct idmap_hashent *
 idmap_alloc_id(struct idmap_hashtable *h, __u32 id)
 {
+       idmap_alloc_hashtable(h);
        return idmap_id_hash(h, id);
 }
 
@@ -449,9 +484,14 @@ static void
 idmap_update_entry(struct idmap_hashent *he, const char *name,
                size_t namelen, __u32 id)
 {
+       char *str = kmalloc(namelen + 1, GFP_KERNEL);
+       if (str == NULL)
+               return;
+       kfree(he->ih_name);
        he->ih_id = id;
-       memcpy(he->ih_name, name, namelen);
-       he->ih_name[namelen] = '\0';
+       memcpy(str, name, namelen);
+       str[namelen] = '\0';
+       he->ih_name = str;
        he->ih_namelen = namelen;
        he->ih_expires = jiffies + nfs_idmap_cache_timeout;
 }
index 3f4d957..68b3f20 100644 (file)
@@ -330,7 +330,7 @@ void nfs_commit_release_pages(struct nfs_write_data *data);
 
 #ifdef CONFIG_MIGRATION
 extern int nfs_migrate_page(struct address_space *,
-               struct page *, struct page *);
+               struct page *, struct page *, enum migrate_mode);
 #else
 #define nfs_migrate_page NULL
 #endif
index 66020ac..07354b7 100644 (file)
@@ -1186,8 +1186,9 @@ restart:
                                spin_lock(&state->state_lock);
                                list_for_each_entry(lock, &state->lock_states, ls_locks) {
                                        if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
-                                               printk("%s: Lock reclaim failed!\n",
-                                                       __func__);
+                                               pr_warn_ratelimited("NFS: "
+                                                       "%s: Lock reclaim "
+                                                       "failed!\n", __func__);
                                }
                                spin_unlock(&state->state_lock);
                                nfs4_put_open_state(state);
index 55d0128..a03ee52 100644 (file)
@@ -433,7 +433,10 @@ int objio_read_pagelist(struct nfs_read_data *rdata)
        objios->ios->done = _read_done;
        dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
                rdata->args.offset, rdata->args.count);
-       return ore_read(objios->ios);
+       ret = ore_read(objios->ios);
+       if (unlikely(ret))
+               objio_free_result(&objios->oir);
+       return ret;
 }
 
 /*
@@ -464,8 +467,16 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
        struct objio_state *objios = priv;
        struct nfs_write_data *wdata = objios->oir.rpcdata;
        pgoff_t index = offset / PAGE_SIZE;
-       struct page *page = find_get_page(wdata->inode->i_mapping, index);
+       struct page *page;
+       loff_t i_size = i_size_read(wdata->inode);
+
+       if (offset >= i_size) {
+               *uptodate = true;
+               dprintk("%s: g_zero_page index=0x%lx\n", __func__, index);
+               return ZERO_PAGE(0);
+       }
 
+       page = find_get_page(wdata->inode->i_mapping, index);
        if (!page) {
                page = find_or_create_page(wdata->inode->i_mapping,
                                                index, GFP_NOFS);
@@ -486,8 +497,10 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
 
 static void __r4w_put_page(void *priv, struct page *page)
 {
-       dprintk("%s: index=0x%lx\n", __func__, page->index);
-       page_cache_release(page);
+       dprintk("%s: index=0x%lx\n", __func__,
+               (page == ZERO_PAGE(0)) ? -1UL : page->index);
+       if (ZERO_PAGE(0) != page)
+               page_cache_release(page);
        return;
 }
 
@@ -517,8 +530,10 @@ int objio_write_pagelist(struct nfs_write_data *wdata, int how)
        dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
                wdata->args.offset, wdata->args.count);
        ret = ore_write(objios->ios);
-       if (unlikely(ret))
+       if (unlikely(ret)) {
+               objio_free_result(&objios->oir);
                return ret;
+       }
 
        if (objios->sync)
                _write_done(objios->ios, objios);
index 4efd421..c6e523a 100644 (file)
@@ -1711,7 +1711,7 @@ out_error:
 
 #ifdef CONFIG_MIGRATION
 int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
-               struct page *page)
+               struct page *page, enum migrate_mode mode)
 {
        /*
         * If PagePrivate is set, then the page is currently associated with
@@ -1726,7 +1726,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
 
        nfs_fscache_release_page(page, GFP_KERNEL);
 
-       return migrate_page(mapping, newpage, page);
+       return migrate_page(mapping, newpage, page, mode);
 }
 #endif
 
index 08a07a2..57ceaf3 100644 (file)
@@ -191,6 +191,8 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
        while (!list_empty(head)) {
                ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
                list_del_init(&ii->i_dirty);
+               truncate_inode_pages(&ii->vfs_inode.i_data, 0);
+               nilfs_btnode_cache_clear(&ii->i_btnode_cache);
                iput(&ii->vfs_inode);
        }
 }
index bb24ab6..6f24e67 100644 (file)
@@ -2309,6 +2309,8 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
                if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
                        continue;
                list_del_init(&ii->i_dirty);
+               truncate_inode_pages(&ii->vfs_inode.i_data, 0);
+               nilfs_btnode_cache_clear(&ii->i_btnode_cache);
                iput(&ii->vfs_inode);
        }
 }
index 6e39668..1c7d45e 100644 (file)
@@ -1950,7 +1950,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
        if (ret < 0)
                mlog_errno(ret);
 
-       if (file->f_flags & O_SYNC)
+       if (file && (file->f_flags & O_SYNC))
                handle->h_sync = 1;
 
        ocfs2_commit_trans(osb, handle);
@@ -2422,8 +2422,10 @@ out_dio:
                unaligned_dio = 0;
        }
 
-       if (unaligned_dio)
+       if (unaligned_dio) {
+               ocfs2_iocb_clear_unaligned_aio(iocb);
                atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio);
+       }
 
 out:
        if (rw_level != -1)
index 0434ee9..76e2012 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -397,10 +397,10 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
 {
        struct file *file;
        struct inode *inode;
-       int error;
+       int error, fput_needed;
 
        error = -EBADF;
-       file = fget(fd);
+       file = fget_raw_light(fd, &fput_needed);
        if (!file)
                goto out;
 
@@ -414,7 +414,7 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
        if (!error)
                set_fs_pwd(current->fs, &file->f_path);
 out_putf:
-       fput(file);
+       fput_light(file, fput_needed);
 out:
        return error;
 }
index fbb0b47..d5378d0 100644 (file)
@@ -110,6 +110,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
 
                /* prevent the page from being discarded on memory pressure */
                SetPageDirty(page);
+               SetPageUptodate(page);
 
                unlock_page(page);
                put_page(page);
index 842ae22..512be8b 100644 (file)
@@ -274,13 +274,16 @@ void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
  * Check if we need to grow the arrays holding pages and partial page
  * descriptions.
  */
-int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
+int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
 {
-       if (pipe->buffers <= PIPE_DEF_BUFFERS)
+       unsigned int buffers = ACCESS_ONCE(pipe->buffers);
+
+       spd->nr_pages_max = buffers;
+       if (buffers <= PIPE_DEF_BUFFERS)
                return 0;
 
-       spd->pages = kmalloc(pipe->buffers * sizeof(struct page *), GFP_KERNEL);
-       spd->partial = kmalloc(pipe->buffers * sizeof(struct partial_page), GFP_KERNEL);
+       spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
+       spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
 
        if (spd->pages && spd->partial)
                return 0;
@@ -290,10 +293,9 @@ int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
        return -ENOMEM;
 }
 
-void splice_shrink_spd(struct pipe_inode_info *pipe,
-                      struct splice_pipe_desc *spd)
+void splice_shrink_spd(struct splice_pipe_desc *spd)
 {
-       if (pipe->buffers <= PIPE_DEF_BUFFERS)
+       if (spd->nr_pages_max <= PIPE_DEF_BUFFERS)
                return;
 
        kfree(spd->pages);
@@ -316,6 +318,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &page_cache_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -327,7 +330,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
        index = *ppos >> PAGE_CACHE_SHIFT;
        loff = *ppos & ~PAGE_CACHE_MASK;
        req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       nr_pages = min(req_pages, pipe->buffers);
+       nr_pages = min(req_pages, spd.nr_pages_max);
 
        /*
         * Lookup the (hopefully) full range of pages we need.
@@ -498,7 +501,7 @@ fill_it:
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return error;
 }
 
@@ -599,6 +602,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &default_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -609,8 +613,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 
        res = -ENOMEM;
        vec = __vec;
-       if (pipe->buffers > PIPE_DEF_BUFFERS) {
-               vec = kmalloc(pipe->buffers * sizeof(struct iovec), GFP_KERNEL);
+       if (spd.nr_pages_max > PIPE_DEF_BUFFERS) {
+               vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL);
                if (!vec)
                        goto shrink_ret;
        }
@@ -618,7 +622,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
        offset = *ppos & ~PAGE_CACHE_MASK;
        nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 
-       for (i = 0; i < nr_pages && i < pipe->buffers && len; i++) {
+       for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
                struct page *page;
 
                page = alloc_page(GFP_USER);
@@ -666,7 +670,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
 shrink_ret:
        if (vec != __vec)
                kfree(vec);
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return res;
 
 err:
@@ -1618,6 +1622,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &user_page_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -1633,13 +1638,13 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
 
        spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
                                            spd.partial, flags & SPLICE_F_GIFT,
-                                           pipe->buffers);
+                                           spd.nr_pages_max);
        if (spd.nr_pages <= 0)
                ret = spd.nr_pages;
        else
                ret = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 }
 
index ef3d1ba..15e2fc5 100644 (file)
@@ -718,8 +718,12 @@ static int fixup_free_space(struct ubifs_info *c)
                lnum = ubifs_next_log_lnum(c, lnum);
        }
 
-       /* Fixup the current log head */
-       err = fixup_leb(c, c->lhead_lnum, c->lhead_offs);
+       /*
+        * Fixup the log head which contains the only a CS node at the
+        * beginning.
+        */
+       err = fixup_leb(c, c->lhead_lnum,
+                       ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size));
        if (err)
                goto out;
 
index 87cb24a..516b7f0 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/seq_file.h>
 #include <linux/bitmap.h>
 #include <linux/crc-itu-t.h>
+#include <linux/log2.h>
 #include <asm/byteorder.h>
 
 #include "udf_sb.h"
@@ -1217,16 +1218,65 @@ out_bh:
        return ret;
 }
 
+static int udf_load_sparable_map(struct super_block *sb,
+                                struct udf_part_map *map,
+                                struct sparablePartitionMap *spm)
+{
+       uint32_t loc;
+       uint16_t ident;
+       struct sparingTable *st;
+       struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
+       int i;
+       struct buffer_head *bh;
+
+       map->s_partition_type = UDF_SPARABLE_MAP15;
+       sdata->s_packet_len = le16_to_cpu(spm->packetLength);
+       if (!is_power_of_2(sdata->s_packet_len)) {
+               udf_err(sb, "error loading logical volume descriptor: "
+                       "Invalid packet length %u\n",
+                       (unsigned)sdata->s_packet_len);
+               return -EIO;
+       }
+       if (spm->numSparingTables > 4) {
+               udf_err(sb, "error loading logical volume descriptor: "
+                       "Too many sparing tables (%d)\n",
+                       (int)spm->numSparingTables);
+               return -EIO;
+       }
+
+       for (i = 0; i < spm->numSparingTables; i++) {
+               loc = le32_to_cpu(spm->locSparingTable[i]);
+               bh = udf_read_tagged(sb, loc, loc, &ident);
+               if (!bh)
+                       continue;
+
+               st = (struct sparingTable *)bh->b_data;
+               if (ident != 0 ||
+                   strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
+                           strlen(UDF_ID_SPARING)) ||
+                   sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
+                                                       sb->s_blocksize) {
+                       brelse(bh);
+                       continue;
+               }
+
+               sdata->s_spar_map[i] = bh;
+       }
+       map->s_partition_func = udf_get_pblock_spar15;
+       return 0;
+}
+
 static int udf_load_logicalvol(struct super_block *sb, sector_t block,
                               struct kernel_lb_addr *fileset)
 {
        struct logicalVolDesc *lvd;
-       int i, j, offset;
+       int i, offset;
        uint8_t type;
        struct udf_sb_info *sbi = UDF_SB(sb);
        struct genericPartitionMap *gpm;
        uint16_t ident;
        struct buffer_head *bh;
+       unsigned int table_len;
        int ret = 0;
 
        bh = udf_read_tagged(sb, block, block, &ident);
@@ -1234,15 +1284,20 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
                return 1;
        BUG_ON(ident != TAG_IDENT_LVD);
        lvd = (struct logicalVolDesc *)bh->b_data;
-
-       i = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
-       if (i != 0) {
-               ret = i;
+       table_len = le32_to_cpu(lvd->mapTableLength);
+       if (table_len > sb->s_blocksize - sizeof(*lvd)) {
+               udf_err(sb, "error loading logical volume descriptor: "
+                       "Partition table too long (%u > %lu)\n", table_len,
+                       sb->s_blocksize - sizeof(*lvd));
                goto out_bh;
        }
 
+       ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
+       if (ret)
+               goto out_bh;
+
        for (i = 0, offset = 0;
-            i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength);
+            i < sbi->s_partitions && offset < table_len;
             i++, offset += gpm->partitionMapLength) {
                struct udf_part_map *map = &sbi->s_partmaps[i];
                gpm = (struct genericPartitionMap *)
@@ -1277,38 +1332,9 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
                        } else if (!strncmp(upm2->partIdent.ident,
                                                UDF_ID_SPARABLE,
                                                strlen(UDF_ID_SPARABLE))) {
-                               uint32_t loc;
-                               struct sparingTable *st;
-                               struct sparablePartitionMap *spm =
-                                       (struct sparablePartitionMap *)gpm;
-
-                               map->s_partition_type = UDF_SPARABLE_MAP15;
-                               map->s_type_specific.s_sparing.s_packet_len =
-                                               le16_to_cpu(spm->packetLength);
-                               for (j = 0; j < spm->numSparingTables; j++) {
-                                       struct buffer_head *bh2;
-
-                                       loc = le32_to_cpu(
-                                               spm->locSparingTable[j]);
-                                       bh2 = udf_read_tagged(sb, loc, loc,
-                                                            &ident);
-                                       map->s_type_specific.s_sparing.
-                                                       s_spar_map[j] = bh2;
-
-                                       if (bh2 == NULL)
-                                               continue;
-
-                                       st = (struct sparingTable *)bh2->b_data;
-                                       if (ident != 0 || strncmp(
-                                               st->sparingIdent.ident,
-                                               UDF_ID_SPARING,
-                                               strlen(UDF_ID_SPARING))) {
-                                               brelse(bh2);
-                                               map->s_type_specific.s_sparing.
-                                                       s_spar_map[j] = NULL;
-                                       }
-                               }
-                               map->s_partition_func = udf_get_pblock_spar15;
+                               if (udf_load_sparable_map(sb, map,
+                                   (struct sparablePartitionMap *)gpm) < 0)
+                                       goto out_bh;
                        } else if (!strncmp(upm2->partIdent.ident,
                                                UDF_ID_METADATA,
                                                strlen(UDF_ID_METADATA))) {
index a03c098..bc00876 100644 (file)
@@ -445,6 +445,18 @@ static inline int pmd_write(pmd_t pmd)
 #endif /* __HAVE_ARCH_PMD_WRITE */
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+#ifndef pmd_read_atomic
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+       /*
+        * Depend on compiler for an atomic pmd read. NOTE: this is
+        * only going to work, if the pmdval_t isn't larger than
+        * an unsigned long.
+        */
+       return *pmdp;
+}
+#endif
+
 /*
  * This function is meant to be used by sites walking pagetables with
  * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
@@ -458,14 +470,30 @@ static inline int pmd_write(pmd_t pmd)
  * undefined so behaving like if the pmd was none is safe (because it
  * can return none anyway). The compiler level barrier() is critically
  * important to compute the two checks atomically on the same pmdval.
+ *
+ * For 32bit kernels with a 64bit large pmd_t this automatically takes
+ * care of reading the pmd atomically to avoid SMP race conditions
+ * against pmd_populate() when the mmap_sem is hold for reading by the
+ * caller (a special atomic read not done by "gcc" as in the generic
+ * version above, is also needed when THP is disabled because the page
+ * fault can populate the pmd from under us).
  */
 static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
 {
-       /* depend on compiler for an atomic pmd read */
-       pmd_t pmdval = *pmd;
+       pmd_t pmdval = pmd_read_atomic(pmd);
        /*
         * The barrier will stabilize the pmdval in a register or on
         * the stack so that it will stop changing under the code.
+        *
+        * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
+        * pmd_read_atomic is allowed to return a not atomic pmdval
+        * (for example pointing to an hugepage that has never been
+        * mapped in the pmd). The below checks will only care about
+        * the low part of the pmd with 32bit PAE x86 anyway, with the
+        * exception of pmd_none(). So the important thing is that if
+        * the low part of the pmd is found null, the high part will
+        * be also null or the pmd_none() check below would be
+        * confused.
         */
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        barrier();
index 5a20ffb..2f89d0e 100644 (file)
@@ -269,6 +269,7 @@ header-y += netfilter_ipv4.h
 header-y += netfilter_ipv6.h
 header-y += netlink.h
 header-y += netrom.h
+header-y += nfc.h
 header-y += nfs.h
 header-y += nfs2.h
 header-y += nfs3.h
index 2314ad8..b1a520e 100644 (file)
@@ -140,6 +140,7 @@ struct kiocb {
                (x)->ki_dtor = NULL;                    \
                (x)->ki_obj.tsk = tsk;                  \
                (x)->ki_user_data = 0;                  \
+               (x)->private = NULL;                    \
        } while (0)
 
 #define AIO_RING_MAGIC                 0xa10a10a1
index 0ed1eb0..ff039f0 100644 (file)
@@ -481,6 +481,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
 
 #define blk_queue_tagged(q)    test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
 #define blk_queue_stopped(q)   test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
+#define blk_queue_dead(q)      test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
 #define blk_queue_nomerges(q)  test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
 #define blk_queue_noxmerges(q) \
        test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
index 6cb60fd..c692acc 100644 (file)
@@ -66,8 +66,9 @@ enum {
        /* migration should happen before other stuff but after perf */
        CPU_PRI_PERF            = 20,
        CPU_PRI_MIGRATION       = 10,
-       /* prepare workqueues for other notifiers */
-       CPU_PRI_WORKQUEUE       = 5,
+       /* bring up workqueues before normal notifiers and down after */
+       CPU_PRI_WORKQUEUE_UP    = 5,
+       CPU_PRI_WORKQUEUE_DOWN  = -5,
 };
 
 #define CPU_ONLINE             0x0002 /* CPU (unsigned)v is up */
index e9eaec5..7a7e5fd 100644 (file)
@@ -89,42 +89,33 @@ extern void rebuild_sched_domains(void);
 extern void cpuset_print_task_mems_allowed(struct task_struct *p);
 
 /*
- * reading current mems_allowed and mempolicy in the fastpath must protected
- * by get_mems_allowed()
+ * get_mems_allowed is required when making decisions involving mems_allowed
+ * such as during page allocation. mems_allowed can be updated in parallel
+ * and depending on the new value an operation can fail potentially causing
+ * process failure. A retry loop with get_mems_allowed and put_mems_allowed
+ * prevents these artificial failures.
  */
-static inline void get_mems_allowed(void)
+static inline unsigned int get_mems_allowed(void)
 {
-       current->mems_allowed_change_disable++;
-
-       /*
-        * ensure that reading mems_allowed and mempolicy happens after the
-        * update of ->mems_allowed_change_disable.
-        *
-        * the write-side task finds ->mems_allowed_change_disable is not 0,
-        * and knows the read-side task is reading mems_allowed or mempolicy,
-        * so it will clear old bits lazily.
-        */
-       smp_mb();
+       return read_seqcount_begin(&current->mems_allowed_seq);
 }
 
-static inline void put_mems_allowed(void)
+/*
+ * If this returns false, the operation that took place after get_mems_allowed
+ * may have failed. It is up to the caller to retry the operation if
+ * appropriate.
+ */
+static inline bool put_mems_allowed(unsigned int seq)
 {
-       /*
-        * ensure that reading mems_allowed and mempolicy before reducing
-        * mems_allowed_change_disable.
-        *
-        * the write-side task will know that the read-side task is still
-        * reading mems_allowed or mempolicy, don't clears old bits in the
-        * nodemask.
-        */
-       smp_mb();
-       --ACCESS_ONCE(current->mems_allowed_change_disable);
+       return !read_seqcount_retry(&current->mems_allowed_seq, seq);
 }
 
 static inline void set_mems_allowed(nodemask_t nodemask)
 {
        task_lock(current);
+       write_seqcount_begin(&current->mems_allowed_seq);
        current->mems_allowed = nodemask;
+       write_seqcount_end(&current->mems_allowed_seq);
        task_unlock(current);
 }
 
@@ -234,12 +225,14 @@ static inline void set_mems_allowed(nodemask_t nodemask)
 {
 }
 
-static inline void get_mems_allowed(void)
+static inline unsigned int get_mems_allowed(void)
 {
+       return 0;
 }
 
-static inline void put_mems_allowed(void)
+static inline bool put_mems_allowed(unsigned int seq)
 {
+       return true;
 }
 
 #endif /* !CONFIG_CPUSETS */
index 43d36b7..29b6353 100644 (file)
@@ -525,6 +525,7 @@ enum positive_aop_returns {
 struct page;
 struct address_space;
 struct writeback_control;
+enum migrate_mode;
 
 struct iov_iter {
        const struct iovec *iov;
@@ -609,9 +610,12 @@ struct address_space_operations {
                        loff_t offset, unsigned long nr_segs);
        int (*get_xip_mem)(struct address_space *, pgoff_t, int,
                                                void **, unsigned long *);
-       /* migrate the contents of a page to the specified target */
+       /*
+        * migrate the contents of a page to the specified target. If sync
+        * is false, it must not block.
+        */
        int (*migratepage) (struct address_space *,
-                       struct page *, struct page *);
+                       struct page *, struct page *, enum migrate_mode);
        int (*launder_page) (struct page *);
        int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
                                        unsigned long);
@@ -2586,7 +2590,8 @@ extern int generic_check_addressable(unsigned, u64);
 
 #ifdef CONFIG_MIGRATION
 extern int buffer_migrate_page(struct address_space *,
-                               struct page *, struct page *);
+                               struct page *, struct page *,
+                               enum migrate_mode);
 #else
 #define buffer_migrate_page NULL
 #endif
index fd0dc30..cc07d27 100644 (file)
@@ -165,6 +165,7 @@ enum  hrtimer_base_type {
  * @lock:              lock protecting the base and associated clock bases
  *                     and timers
  * @active_bases:      Bitfield to mark bases with active timers
+ * @clock_was_set:     Indicates that clock was set from irq context.
  * @expires_next:      absolute time of the next event which was scheduled
  *                     via clock_set_next_event()
  * @hres_active:       State of high resolution mode
@@ -177,7 +178,8 @@ enum  hrtimer_base_type {
  */
 struct hrtimer_cpu_base {
        raw_spinlock_t                  lock;
-       unsigned long                   active_bases;
+       unsigned int                    active_bases;
+       unsigned int                    clock_was_set;
 #ifdef CONFIG_HIGH_RES_TIMERS
        ktime_t                         expires_next;
        int                             hres_active;
@@ -286,6 +288,8 @@ extern void hrtimer_peek_ahead_timers(void);
 # define MONOTONIC_RES_NSEC    HIGH_RES_NSEC
 # define KTIME_MONOTONIC_RES   KTIME_HIGH_RES
 
+extern void clock_was_set_delayed(void);
+
 #else
 
 # define MONOTONIC_RES_NSEC    LOW_RES_NSEC
@@ -306,6 +310,9 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
 {
        return 0;
 }
+
+static inline void clock_was_set_delayed(void) { }
+
 #endif
 
 extern void clock_was_set(void);
@@ -320,6 +327,7 @@ extern ktime_t ktime_get(void);
 extern ktime_t ktime_get_real(void);
 extern ktime_t ktime_get_boottime(void);
 extern ktime_t ktime_get_monotonic_offset(void);
+extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot);
 
 DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
 
index d9d6c86..c5ed2f1 100644 (file)
@@ -14,6 +14,15 @@ struct user_struct;
 #include <linux/shm.h>
 #include <asm/tlbflush.h>
 
+struct hugepage_subpool {
+       spinlock_t lock;
+       long count;
+       long max_hpages, used_hpages;
+};
+
+struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
+void hugepage_put_subpool(struct hugepage_subpool *spool);
+
 int PageHuge(struct page *page);
 
 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
@@ -138,12 +147,11 @@ struct hugetlbfs_config {
 };
 
 struct hugetlbfs_sb_info {
-       long    max_blocks;   /* blocks allowed */
-       long    free_blocks;  /* blocks free */
        long    max_inodes;   /* inodes allowed */
        long    free_inodes;  /* inodes free */
        spinlock_t      stat_lock;
        struct hstate *hstate;
+       struct hugepage_subpool *spool;
 };
 
 
@@ -166,8 +174,6 @@ extern const struct file_operations hugetlbfs_file_operations;
 extern const struct vm_operations_struct hugetlb_vm_ops;
 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
                                struct user_struct **user, int creat_flags);
-int hugetlb_get_quota(struct address_space *mapping, long delta);
-void hugetlb_put_quota(struct address_space *mapping, long delta);
 
 static inline int is_file_hugepages(struct file *file)
 {
index 32574ee..df53fdf 100644 (file)
@@ -30,6 +30,13 @@ extern struct fs_struct init_fs;
 #define INIT_THREADGROUP_FORK_LOCK(sig)
 #endif
 
+#ifdef CONFIG_CPUSETS
+#define INIT_CPUSET_SEQ                                                        \
+       .mems_allowed_seq = SEQCNT_ZERO,
+#else
+#define INIT_CPUSET_SEQ
+#endif
+
 #define INIT_SIGNALS(sig) {                                            \
        .nr_threads     = 1,                                            \
        .wait_chldexit  = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
@@ -193,6 +200,7 @@ extern struct cred init_cred;
        INIT_FTRACE_GRAPH                                               \
        INIT_TRACE_RECURSION                                            \
        INIT_TASK_RCU_PREEMPT(tsk)                                      \
+       INIT_CPUSET_SEQ                                                 \
 }
 
 
index e39aeec..eaf8674 100644 (file)
@@ -6,18 +6,31 @@
 
 typedef struct page *new_page_t(struct page *, unsigned long private, int **);
 
+/*
+ * MIGRATE_ASYNC means never block
+ * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking
+ *     on most operations but not ->writepage as the potential stall time
+ *     is too significant
+ * MIGRATE_SYNC will block when migrating pages
+ */
+enum migrate_mode {
+       MIGRATE_ASYNC,
+       MIGRATE_SYNC_LIGHT,
+       MIGRATE_SYNC,
+};
+
 #ifdef CONFIG_MIGRATION
 #define PAGE_MIGRATION 1
 
 extern void putback_lru_pages(struct list_head *l);
 extern int migrate_page(struct address_space *,
-                       struct page *, struct page *);
+                       struct page *, struct page *, enum migrate_mode);
 extern int migrate_pages(struct list_head *l, new_page_t x,
                        unsigned long private, bool offlining,
-                       bool sync);
+                       enum migrate_mode mode);
 extern int migrate_huge_pages(struct list_head *l, new_page_t x,
                        unsigned long private, bool offlining,
-                       bool sync);
+                       enum migrate_mode mode);
 
 extern int fail_migrate_page(struct address_space *,
                        struct page *, struct page *);
@@ -36,10 +49,10 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
 static inline void putback_lru_pages(struct list_head *l) {}
 static inline int migrate_pages(struct list_head *l, new_page_t x,
                unsigned long private, bool offlining,
-               bool sync) { return -ENOSYS; }
+               enum migrate_mode mode) { return -ENOSYS; }
 static inline int migrate_huge_pages(struct list_head *l, new_page_t x,
                unsigned long private, bool offlining,
-               bool sync) { return -ENOSYS; }
+               enum migrate_mode mode) { return -ENOSYS; }
 
 static inline int migrate_prep(void) { return -ENOSYS; }
 static inline int migrate_prep_local(void) { return -ENOSYS; }
index 188cb2f..25842b6 100644 (file)
@@ -173,6 +173,8 @@ static inline int is_unevictable_lru(enum lru_list l)
 #define ISOLATE_CLEAN          ((__force isolate_mode_t)0x4)
 /* Isolate unmapped file */
 #define ISOLATE_UNMAPPED       ((__force isolate_mode_t)0x8)
+/* Isolate for asynchronous migration */
+#define ISOLATE_ASYNC_MIGRATE  ((__force isolate_mode_t)0x10)
 
 /* LRU Isolation modes. */
 typedef unsigned __bitwise__ isolate_mode_t;
@@ -652,7 +654,7 @@ typedef struct pglist_data {
                                             range, including holes */
        int node_id;
        wait_queue_head_t kswapd_wait;
-       struct task_struct *kswapd;
+       struct task_struct *kswapd;     /* Protected by lock_memory_hotplug() */
        int kswapd_max_order;
        enum zone_type classzone_idx;
 } pg_data_t;
index c0cfa0d..7cda65b 100644 (file)
@@ -176,8 +176,6 @@ enum pci_dev_flags {
        PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
        /* Provide indication device is assigned by a Virtual Machine Manager */
        PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
-       /* Device causes system crash if in D3 during S3 sleep */
-       PCI_DEV_FLAGS_NO_D3_DURING_SLEEP = (__force pci_dev_flags_t) 8,
 };
 
 enum pci_irq_reroute_variant {
index 1c4f3e9..d336c35 100644 (file)
@@ -145,6 +145,7 @@ extern unsigned long this_cpu_load(void);
 
 
 extern void calc_global_load(unsigned long ticks);
+extern void update_cpu_load_nohz(void);
 
 extern unsigned long get_parent_ip(unsigned long addr);
 
@@ -1481,7 +1482,7 @@ struct task_struct {
 #endif
 #ifdef CONFIG_CPUSETS
        nodemask_t mems_allowed;        /* Protected by alloc_lock */
-       int mems_allowed_change_disable;
+       seqcount_t mems_allowed_seq;    /* Seqence no to catch updates */
        int cpuset_mem_spread_rotor;
        int cpuset_slab_spread_rotor;
 #endif
@@ -1892,6 +1893,14 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
 }
 #endif
 
+#ifdef CONFIG_NO_HZ
+void calc_load_enter_idle(void);
+void calc_load_exit_idle(void);
+#else
+static inline void calc_load_enter_idle(void) { }
+static inline void calc_load_exit_idle(void) { }
+#endif /* CONFIG_NO_HZ */
+
 #ifndef CONFIG_CPUMASK_OFFSTACK
 static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
 {
index bdb4590..53dc7e7 100644 (file)
@@ -213,11 +213,8 @@ enum {
        /* device driver is going to provide hardware time stamp */
        SKBTX_IN_PROGRESS = 1 << 2,
 
-       /* ensure the originating sk reference is available on driver level */
-       SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
-
        /* device driver supports TX zero-copy buffers */
-       SKBTX_DEV_ZEROCOPY = 1 << 4,
+       SKBTX_DEV_ZEROCOPY = 1 << 3,
 };
 
 /*
index 3ffef2f..1ac5727 100644 (file)
@@ -51,7 +51,8 @@ struct partial_page {
 struct splice_pipe_desc {
        struct page **pages;            /* page map */
        struct partial_page *partial;   /* pages[] may not be contig */
-       int nr_pages;                   /* number of pages in map */
+       int nr_pages;                   /* number of populated pages in map */
+       unsigned int nr_pages_max;      /* pages[] & partial[] arrays size */
        unsigned int flags;             /* splice flags */
        const struct pipe_buf_operations *ops;/* ops associated with output pipe */
        void (*spd_release)(struct splice_pipe_desc *, unsigned int);
@@ -85,9 +86,8 @@ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
 /*
  * for dynamic pipe sizing
  */
-extern int splice_grow_spd(struct pipe_inode_info *, struct splice_pipe_desc *);
-extern void splice_shrink_spd(struct pipe_inode_info *,
-                               struct splice_pipe_desc *);
+extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *);
+extern void splice_shrink_spd(struct splice_pipe_desc *);
 extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
 
 extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
index aa60fe7..08e90fb 100644 (file)
@@ -266,7 +266,7 @@ static inline int ntp_synced(void)
 /* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
 extern u64 tick_length;
 
-extern void second_overflow(void);
+extern int second_overflow(unsigned long secs);
 extern void update_ntp_one_tick(void);
 extern int do_adjtimex(struct timex *);
 extern void hardpps(const struct timespec *, const struct timespec *);
index 9808877..a7a683e 100644 (file)
@@ -42,6 +42,7 @@
 #include <net/netlabel.h>
 #include <net/request_sock.h>
 #include <linux/atomic.h>
+#include <asm/unaligned.h>
 
 /* known doi values */
 #define CIPSO_V4_DOI_UNKNOWN          0x00000000
@@ -285,7 +286,33 @@ static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
 static inline int cipso_v4_validate(const struct sk_buff *skb,
                                    unsigned char **option)
 {
-       return -ENOSYS;
+       unsigned char *opt = *option;
+       unsigned char err_offset = 0;
+       u8 opt_len = opt[1];
+       u8 opt_iter;
+
+       if (opt_len < 8) {
+               err_offset = 1;
+               goto out;
+       }
+
+       if (get_unaligned_be32(&opt[2]) == 0) {
+               err_offset = 2;
+               goto out;
+       }
+
+       for (opt_iter = 6; opt_iter < opt_len;) {
+               if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
+                       err_offset = opt_iter + 1;
+                       goto out;
+               }
+               opt_iter += opt[opt_iter + 1];
+       }
+
+out:
+       *option = opt + err_offset;
+       return err_offset;
+
 }
 #endif /* CONFIG_NETLABEL */
 
index 55ce96b..9d7d54a 100644 (file)
@@ -220,13 +220,16 @@ struct tcf_proto {
 
 struct qdisc_skb_cb {
        unsigned int            pkt_len;
-       unsigned char           data[24];
+       u16                     bond_queue_mapping;
+       u16                     _pad;
+       unsigned char           data[20];
 };
 
 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
 {
        struct qdisc_skb_cb *qcb;
-       BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz);
+
+       BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
        BUILD_BUG_ON(sizeof(qcb->data) < sz);
 }
 
index 6a308d4..1e100c6 100644 (file)
@@ -159,6 +159,8 @@ enum ata_command_set {
         ATAPI_COMMAND_SET = 1,
 };
 
+#define ATA_RESP_FIS_SIZE 24
+
 struct sata_device {
         enum   ata_command_set command_set;
         struct smp_resp        rps_resp; /* report_phy_sata_resp */
@@ -170,7 +172,7 @@ struct sata_device {
 
        struct ata_port *ap;
        struct ata_host ata_host;
-       struct ata_taskfile tf;
+       u8     fis[ATA_RESP_FIS_SIZE];
        u32 sstatus;
        u32 serror;
        u32 scontrol;
@@ -486,7 +488,7 @@ enum exec_status {
  */
 struct ata_task_resp {
        u16  frame_len;
-       u8   ending_fis[24];      /* dev to host or data-in */
+       u8   ending_fis[ATA_RESP_FIS_SIZE];       /* dev to host or data-in */
        u32  sstatus;
        u32  serror;
        u32  scontrol;
index 94bbec3..6ee550e 100644 (file)
@@ -157,6 +157,7 @@ enum tcm_sense_reason_table {
        TCM_CHECK_CONDITION_UNIT_ATTENTION      = 0x0e,
        TCM_CHECK_CONDITION_NOT_READY           = 0x0f,
        TCM_RESERVATION_CONFLICT                = 0x10,
+       TCM_ADDRESS_OUT_OF_RANGE                = 0x11,
 };
 
 struct se_obj {
index 0b1712d..46a1d3c 100644 (file)
@@ -964,7 +964,6 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
 {
        bool need_loop;
 
-repeat:
        /*
         * Allow tasks that have access to memory reserves because they have
         * been OOM killed to get memory anywhere.
@@ -983,45 +982,19 @@ repeat:
         */
        need_loop = task_has_mempolicy(tsk) ||
                        !nodes_intersects(*newmems, tsk->mems_allowed);
-       nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
-       mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
 
-       /*
-        * ensure checking ->mems_allowed_change_disable after setting all new
-        * allowed nodes.
-        *
-        * the read-side task can see an nodemask with new allowed nodes and
-        * old allowed nodes. and if it allocates page when cpuset clears newly
-        * disallowed ones continuous, it can see the new allowed bits.
-        *
-        * And if setting all new allowed nodes is after the checking, setting
-        * all new allowed nodes and clearing newly disallowed ones will be done
-        * continuous, and the read-side task may find no node to alloc page.
-        */
-       smp_mb();
+       if (need_loop)
+               write_seqcount_begin(&tsk->mems_allowed_seq);
 
-       /*
-        * Allocation of memory is very fast, we needn't sleep when waiting
-        * for the read-side.
-        */
-       while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
-               task_unlock(tsk);
-               if (!task_curr(tsk))
-                       yield();
-               goto repeat;
-       }
-
-       /*
-        * ensure checking ->mems_allowed_change_disable before clearing all new
-        * disallowed nodes.
-        *
-        * if clearing newly disallowed bits before the checking, the read-side
-        * task may find no node to alloc page.
-        */
-       smp_mb();
+       nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
+       mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
 
        mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
        tsk->mems_allowed = *newmems;
+
+       if (need_loop)
+               write_seqcount_end(&tsk->mems_allowed_seq);
+
        task_unlock(tsk);
 }
 
index 79ee71f..222457a 100644 (file)
@@ -979,6 +979,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
 #ifdef CONFIG_CGROUPS
        init_rwsem(&sig->threadgroup_fork_lock);
 #endif
+#ifdef CONFIG_CPUSETS
+       seqcount_init(&tsk->mems_allowed_seq);
+#endif
 
        sig->oom_adj = current->signal->oom_adj;
        sig->oom_score_adj = current->signal->oom_score_adj;
index ae34bf5..6db7a5e 100644 (file)
@@ -657,6 +657,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
        return 0;
 }
 
+static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+{
+       ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
+       ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
+
+       return ktime_get_update_offsets(offs_real, offs_boot);
+}
+
 /*
  * Retrigger next event is called after clock was set
  *
@@ -665,22 +673,12 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
 static void retrigger_next_event(void *arg)
 {
        struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
-       struct timespec realtime_offset, xtim, wtm, sleep;
 
        if (!hrtimer_hres_active())
                return;
 
-       /* Optimized out for !HIGH_RES */
-       get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
-       set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
-
-       /* Adjust CLOCK_REALTIME offset */
        raw_spin_lock(&base->lock);
-       base->clock_base[HRTIMER_BASE_REALTIME].offset =
-               timespec_to_ktime(realtime_offset);
-       base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
-               timespec_to_ktime(sleep);
-
+       hrtimer_update_base(base);
        hrtimer_force_reprogram(base, 0);
        raw_spin_unlock(&base->lock);
 }
@@ -710,13 +708,25 @@ static int hrtimer_switch_to_hres(void)
                base->clock_base[i].resolution = KTIME_HIGH_RES;
 
        tick_setup_sched_timer();
-
        /* "Retrigger" the interrupt to get things going */
        retrigger_next_event(NULL);
        local_irq_restore(flags);
        return 1;
 }
 
+/*
+ * Called from timekeeping code to reprogramm the hrtimer interrupt
+ * device. If called from the timer interrupt context we defer it to
+ * softirq context.
+ */
+void clock_was_set_delayed(void)
+{
+       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+
+       cpu_base->clock_was_set = 1;
+       __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+}
+
 #else
 
 static inline int hrtimer_hres_active(void) { return 0; }
@@ -1250,11 +1260,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
        cpu_base->nr_events++;
        dev->next_event.tv64 = KTIME_MAX;
 
-       entry_time = now = ktime_get();
+       raw_spin_lock(&cpu_base->lock);
+       entry_time = now = hrtimer_update_base(cpu_base);
 retry:
        expires_next.tv64 = KTIME_MAX;
-
-       raw_spin_lock(&cpu_base->lock);
        /*
         * We set expires_next to KTIME_MAX here with cpu_base->lock
         * held to prevent that a timer is enqueued in our queue via
@@ -1330,8 +1339,12 @@ retry:
         * We need to prevent that we loop forever in the hrtimer
         * interrupt routine. We give it 3 attempts to avoid
         * overreacting on some spurious event.
+        *
+        * Acquire base lock for updating the offsets and retrieving
+        * the current time.
         */
-       now = ktime_get();
+       raw_spin_lock(&cpu_base->lock);
+       now = hrtimer_update_base(cpu_base);
        cpu_base->nr_retries++;
        if (++retries < 3)
                goto retry;
@@ -1343,6 +1356,7 @@ retry:
         */
        cpu_base->nr_hangs++;
        cpu_base->hang_detected = 1;
+       raw_spin_unlock(&cpu_base->lock);
        delta = ktime_sub(now, entry_time);
        if (delta.tv64 > cpu_base->max_hang_time.tv64)
                cpu_base->max_hang_time = delta;
@@ -1395,6 +1409,13 @@ void hrtimer_peek_ahead_timers(void)
 
 static void run_hrtimer_softirq(struct softirq_action *h)
 {
+       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+
+       if (cpu_base->clock_was_set) {
+               cpu_base->clock_was_set = 0;
+               clock_was_set();
+       }
+
        hrtimer_peek_ahead_timers();
 }
 
index 7c0d578..013bd2e 100644 (file)
@@ -367,6 +367,7 @@ int hibernation_snapshot(int platform_mode)
        }
 
        suspend_console();
+       ftrace_stop();
        pm_restrict_gfp_mask();
        error = dpm_suspend(PMSG_FREEZE);
        if (error)
@@ -392,6 +393,7 @@ int hibernation_snapshot(int platform_mode)
        if (error || !in_suspend)
                pm_restore_gfp_mask();
 
+       ftrace_start();
        resume_console();
        dpm_complete(msg);
 
@@ -496,6 +498,7 @@ int hibernation_restore(int platform_mode)
 
        pm_prepare_console();
        suspend_console();
+       ftrace_stop();
        pm_restrict_gfp_mask();
        error = dpm_suspend_start(PMSG_QUIESCE);
        if (!error) {
@@ -503,6 +506,7 @@ int hibernation_restore(int platform_mode)
                dpm_resume_end(PMSG_RECOVER);
        }
        pm_restore_gfp_mask();
+       ftrace_start();
        resume_console();
        pm_restore_console();
        return error;
@@ -529,6 +533,7 @@ int hibernation_platform_enter(void)
 
        entering_platform_hibernation = true;
        suspend_console();
+       ftrace_stop();
        error = dpm_suspend_start(PMSG_HIBERNATE);
        if (error) {
                if (hibernation_ops->recover)
@@ -572,6 +577,7 @@ int hibernation_platform_enter(void)
  Resume_devices:
        entering_platform_hibernation = false;
        dpm_resume_end(PMSG_RESTORE);
+       ftrace_start();
        resume_console();
 
  Close:
index 4953dc0..af48faa 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/export.h>
 #include <linux/suspend.h>
 #include <linux/syscore_ops.h>
+#include <linux/ftrace.h>
 #include <trace/events/power.h>
 
 #include "power.h"
@@ -220,6 +221,7 @@ int suspend_devices_and_enter(suspend_state_t state)
                        goto Close;
        }
        suspend_console();
+       ftrace_stop();
        suspend_test_start();
        error = dpm_suspend_start(PMSG_SUSPEND);
        if (error) {
@@ -239,6 +241,7 @@ int suspend_devices_and_enter(suspend_state_t state)
        suspend_test_start();
        dpm_resume_end(PMSG_RESUME);
        suspend_test_finish("resume devices");
+       ftrace_start();
        resume_console();
  Close:
        if (suspend_ops->end)
index b313086..64f8f97 100644 (file)
@@ -6,7 +6,7 @@
  *
  * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
- * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com>
+ * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
  *
  * This file is released under the GPLv2.
  *
@@ -283,14 +283,17 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
                return -ENOSPC;
 
        if (bio_chain) {
-               src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
+               src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN |
+                                             __GFP_NORETRY);
                if (src) {
                        copy_page(src, buf);
                } else {
                        ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */
                        if (ret)
                                return ret;
-                       src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
+                       src = (void *)__get_free_page(__GFP_WAIT |
+                                                     __GFP_NOWARN |
+                                                     __GFP_NORETRY);
                        if (src) {
                                copy_page(src, buf);
                        } else {
@@ -368,12 +371,17 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
                clear_page(handle->cur);
                handle->cur_swap = offset;
                handle->k = 0;
-       }
-       if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
-               error = hib_wait_on_bio_chain(bio_chain);
-               if (error)
-                       goto out;
-               handle->reqd_free_pages = reqd_free_pages();
+
+               if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
+                       error = hib_wait_on_bio_chain(bio_chain);
+                       if (error)
+                               goto out;
+                       /*
+                        * Recalculate the number of required free pages, to
+                        * make sure we never take more than half.
+                        */
+                       handle->reqd_free_pages = reqd_free_pages();
+               }
        }
  out:
        return error;
@@ -420,8 +428,9 @@ static int swap_writer_finish(struct swap_map_handle *handle,
 /* Maximum number of threads for compression/decompression. */
 #define LZO_THREADS    3
 
-/* Maximum number of pages for read buffering. */
-#define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8)
+/* Minimum/maximum number of pages for read buffering. */
+#define LZO_MIN_RD_PAGES       1024
+#define LZO_MAX_RD_PAGES       8192
 
 
 /**
@@ -631,12 +640,6 @@ static int save_image_lzo(struct swap_map_handle *handle,
                }
        }
 
-       /*
-        * Adjust number of free pages after all allocations have been done.
-        * We don't want to run out of pages when writing.
-        */
-       handle->reqd_free_pages = reqd_free_pages();
-
        /*
         * Start the CRC32 thread.
         */
@@ -658,6 +661,12 @@ static int save_image_lzo(struct swap_map_handle *handle,
                goto out_clean;
        }
 
+       /*
+        * Adjust the number of required free pages after all allocations have
+        * been done. We don't want to run out of pages when writing.
+        */
+       handle->reqd_free_pages = reqd_free_pages();
+
        printk(KERN_INFO
                "PM: Using %u thread(s) for compression.\n"
                "PM: Compressing and saving image data (%u pages) ...     ",
@@ -1067,7 +1076,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
        unsigned i, thr, run_threads, nr_threads;
        unsigned ring = 0, pg = 0, ring_size = 0,
                 have = 0, want, need, asked = 0;
-       unsigned long read_pages;
+       unsigned long read_pages = 0;
        unsigned char **page = NULL;
        struct dec_data *data = NULL;
        struct crc_data *crc = NULL;
@@ -1079,7 +1088,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
        nr_threads = num_online_cpus() - 1;
        nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
 
-       page = vmalloc(sizeof(*page) * LZO_READ_PAGES);
+       page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
        if (!page) {
                printk(KERN_ERR "PM: Failed to allocate LZO page\n");
                ret = -ENOMEM;
@@ -1144,15 +1153,22 @@ static int load_image_lzo(struct swap_map_handle *handle,
        }
 
        /*
-        * Adjust number of pages for read buffering, in case we are short.
+        * Set the number of pages for read buffering.
+        * This is complete guesswork, because we'll only know the real
+        * picture once prepare_image() is called, which is much later on
+        * during the image load phase. We'll assume the worst case and
+        * say that none of the image pages are from high memory.
         */
-       read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1;
-       read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES);
+       if (low_free_pages() > snapshot_get_image_size())
+               read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
+       read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
 
        for (i = 0; i < read_pages; i++) {
                page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
                                                  __GFP_WAIT | __GFP_HIGH :
-                                                 __GFP_WAIT);
+                                                 __GFP_WAIT | __GFP_NOWARN |
+                                                 __GFP_NORETRY);
+
                if (!page[i]) {
                        if (i < LZO_CMP_PAGES) {
                                ring_size = i;
index b6f803a..a535fc9 100644 (file)
@@ -1235,6 +1235,7 @@ static ssize_t subbuf_splice_actor(struct file *in,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .nr_pages = 0,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .partial = partial,
                .flags = flags,
                .ops = &relay_pipe_buf_ops,
@@ -1302,8 +1303,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
                 ret += padding;
 
 out:
-       splice_shrink_spd(pipe, &spd);
-        return ret;
+       splice_shrink_spd(&spd);
+       return ret;
 }
 
 static ssize_t relay_file_splice_read(struct file *in,
index b25b9ed..b16c3ac 100644 (file)
@@ -1885,10 +1885,9 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
 
 #endif
 
-static void calc_load_account_idle(struct rq *this_rq);
 static void update_sysctl(void);
 static int get_update_sysctl_factor(void);
-static void update_cpu_load(struct rq *this_rq);
+static void update_idle_cpu_load(struct rq *this_rq);
 
 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
 {
@@ -3401,11 +3400,73 @@ unsigned long this_cpu_load(void)
 }
 
 
+/*
+ * Global load-average calculations
+ *
+ * We take a distributed and async approach to calculating the global load-avg
+ * in order to minimize overhead.
+ *
+ * The global load average is an exponentially decaying average of nr_running +
+ * nr_uninterruptible.
+ *
+ * Once every LOAD_FREQ:
+ *
+ *   nr_active = 0;
+ *   for_each_possible_cpu(cpu)
+ *     nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
+ *
+ *   avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
+ *
+ * Due to a number of reasons the above turns in the mess below:
+ *
+ *  - for_each_possible_cpu() is prohibitively expensive on machines with
+ *    serious number of cpus, therefore we need to take a distributed approach
+ *    to calculating nr_active.
+ *
+ *        \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
+ *                      = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
+ *
+ *    So assuming nr_active := 0 when we start out -- true per definition, we
+ *    can simply take per-cpu deltas and fold those into a global accumulate
+ *    to obtain the same result. See calc_load_fold_active().
+ *
+ *    Furthermore, in order to avoid synchronizing all per-cpu delta folding
+ *    across the machine, we assume 10 ticks is sufficient time for every
+ *    cpu to have completed this task.
+ *
+ *    This places an upper-bound on the IRQ-off latency of the machine. Then
+ *    again, being late doesn't loose the delta, just wrecks the sample.
+ *
+ *  - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
+ *    this would add another cross-cpu cacheline miss and atomic operation
+ *    to the wakeup path. Instead we increment on whatever cpu the task ran
+ *    when it went into uninterruptible state and decrement on whatever cpu
+ *    did the wakeup. This means that only the sum of nr_uninterruptible over
+ *    all cpus yields the correct result.
+ *
+ *  This covers the NO_HZ=n code, for extra head-aches, see the comment below.
+ */
+
 /* Variables and functions for calc_load */
 static atomic_long_t calc_load_tasks;
 static unsigned long calc_load_update;
 unsigned long avenrun[3];
-EXPORT_SYMBOL(avenrun);
+EXPORT_SYMBOL(avenrun); /* should be removed */
+
+/**
+ * get_avenrun - get the load average array
+ * @loads:     pointer to dest load array
+ * @offset:    offset to add
+ * @shift:     shift count to shift the result left
+ *
+ * These values are estimates at best, so no need for locking.
+ */
+void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+{
+       loads[0] = (avenrun[0] + offset) << shift;
+       loads[1] = (avenrun[1] + offset) << shift;
+       loads[2] = (avenrun[2] + offset) << shift;
+}
 
 static long calc_load_fold_active(struct rq *this_rq)
 {
@@ -3422,6 +3483,9 @@ static long calc_load_fold_active(struct rq *this_rq)
        return delta;
 }
 
+/*
+ * a1 = a0 * e + a * (1 - e)
+ */
 static unsigned long
 calc_load(unsigned long load, unsigned long exp, unsigned long active)
 {
@@ -3433,30 +3497,118 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
 
 #ifdef CONFIG_NO_HZ
 /*
- * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
+ * Handle NO_HZ for the global load-average.
+ *
+ * Since the above described distributed algorithm to compute the global
+ * load-average relies on per-cpu sampling from the tick, it is affected by
+ * NO_HZ.
+ *
+ * The basic idea is to fold the nr_active delta into a global idle-delta upon
+ * entering NO_HZ state such that we can include this as an 'extra' cpu delta
+ * when we read the global state.
+ *
+ * Obviously reality has to ruin such a delightfully simple scheme:
+ *
+ *  - When we go NO_HZ idle during the window, we can negate our sample
+ *    contribution, causing under-accounting.
+ *
+ *    We avoid this by keeping two idle-delta counters and flipping them
+ *    when the window starts, thus separating old and new NO_HZ load.
+ *
+ *    The only trick is the slight shift in index flip for read vs write.
+ *
+ *        0s            5s            10s           15s
+ *          +10           +10           +10           +10
+ *        |-|-----------|-|-----------|-|-----------|-|
+ *    r:0 0 1           1 0           0 1           1 0
+ *    w:0 1 1           0 0           1 1           0 0
+ *
+ *    This ensures we'll fold the old idle contribution in this window while
+ *    accumlating the new one.
+ *
+ *  - When we wake up from NO_HZ idle during the window, we push up our
+ *    contribution, since we effectively move our sample point to a known
+ *    busy state.
+ *
+ *    This is solved by pushing the window forward, and thus skipping the
+ *    sample, for this cpu (effectively using the idle-delta for this cpu which
+ *    was in effect at the time the window opened). This also solves the issue
+ *    of having to deal with a cpu having been in NOHZ idle for multiple
+ *    LOAD_FREQ intervals.
  *
  * When making the ILB scale, we should try to pull this in as well.
  */
-static atomic_long_t calc_load_tasks_idle;
+static atomic_long_t calc_load_idle[2];
+static int calc_load_idx;
+
+static inline int calc_load_write_idx(void)
+{
+       int idx = calc_load_idx;
+
+       /*
+        * See calc_global_nohz(), if we observe the new index, we also
+        * need to observe the new update time.
+        */
+       smp_rmb();
+
+       /*
+        * If the folding window started, make sure we start writing in the
+        * next idle-delta.
+        */
+       if (!time_before(jiffies, calc_load_update))
+               idx++;
+
+       return idx & 1;
+}
 
-static void calc_load_account_idle(struct rq *this_rq)
+static inline int calc_load_read_idx(void)
 {
+       return calc_load_idx & 1;
+}
+
+void calc_load_enter_idle(void)
+{
+       struct rq *this_rq = this_rq();
        long delta;
 
+       /*
+        * We're going into NOHZ mode, if there's any pending delta, fold it
+        * into the pending idle delta.
+        */
        delta = calc_load_fold_active(this_rq);
-       if (delta)
-               atomic_long_add(delta, &calc_load_tasks_idle);
+       if (delta) {
+               int idx = calc_load_write_idx();
+               atomic_long_add(delta, &calc_load_idle[idx]);
+       }
 }
 
-static long calc_load_fold_idle(void)
+void calc_load_exit_idle(void)
 {
-       long delta = 0;
+       struct rq *this_rq = this_rq();
 
        /*
-        * Its got a race, we don't care...
+        * If we're still before the sample window, we're done.
         */
-       if (atomic_long_read(&calc_load_tasks_idle))
-               delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
+       if (time_before(jiffies, this_rq->calc_load_update))
+               return;
+
+       /*
+        * We woke inside or after the sample window, this means we're already
+        * accounted through the nohz accounting, so skip the entire deal and
+        * sync up for the next window.
+        */
+       this_rq->calc_load_update = calc_load_update;
+       if (time_before(jiffies, this_rq->calc_load_update + 10))
+               this_rq->calc_load_update += LOAD_FREQ;
+}
+
+static long calc_load_fold_idle(void)
+{
+       int idx = calc_load_read_idx();
+       long delta = 0;
+
+       if (atomic_long_read(&calc_load_idle[idx]))
+               delta = atomic_long_xchg(&calc_load_idle[idx], 0);
 
        return delta;
 }
@@ -3542,66 +3694,39 @@ static void calc_global_nohz(void)
 {
        long delta, active, n;
 
-       /*
-        * If we crossed a calc_load_update boundary, make sure to fold
-        * any pending idle changes, the respective CPUs might have
-        * missed the tick driven calc_load_account_active() update
-        * due to NO_HZ.
-        */
-       delta = calc_load_fold_idle();
-       if (delta)
-               atomic_long_add(delta, &calc_load_tasks);
+       if (!time_before(jiffies, calc_load_update + 10)) {
+               /*
+                * Catch-up, fold however many we are behind still
+                */
+               delta = jiffies - calc_load_update - 10;
+               n = 1 + (delta / LOAD_FREQ);
 
-       /*
-        * It could be the one fold was all it took, we done!
-        */
-       if (time_before(jiffies, calc_load_update + 10))
-               return;
+               active = atomic_long_read(&calc_load_tasks);
+               active = active > 0 ? active * FIXED_1 : 0;
 
-       /*
-        * Catch-up, fold however many we are behind still
-        */
-       delta = jiffies - calc_load_update - 10;
-       n = 1 + (delta / LOAD_FREQ);
+               avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
+               avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
+               avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
 
-       active = atomic_long_read(&calc_load_tasks);
-       active = active > 0 ? active * FIXED_1 : 0;
-
-       avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
-       avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
-       avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
-
-       calc_load_update += n * LOAD_FREQ;
-}
-#else
-static void calc_load_account_idle(struct rq *this_rq)
-{
-}
+               calc_load_update += n * LOAD_FREQ;
+       }
 
-static inline long calc_load_fold_idle(void)
-{
-       return 0;
+       /*
+        * Flip the idle index...
+        *
+        * Make sure we first write the new time then flip the index, so that
+        * calc_load_write_idx() will see the new time when it reads the new
+        * index, this avoids a double flip messing things up.
+        */
+       smp_wmb();
+       calc_load_idx++;
 }
+#else /* !CONFIG_NO_HZ */
 
-static void calc_global_nohz(void)
-{
-}
-#endif
+static inline long calc_load_fold_idle(void) { return 0; }
+static inline void calc_global_nohz(void) { }
 
-/**
- * get_avenrun - get the load average array
- * @loads:     pointer to dest load array
- * @offset:    offset to add
- * @shift:     shift count to shift the result left
- *
- * These values are estimates at best, so no need for locking.
- */
-void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
-{
-       loads[0] = (avenrun[0] + offset) << shift;
-       loads[1] = (avenrun[1] + offset) << shift;
-       loads[2] = (avenrun[2] + offset) << shift;
-}
+#endif /* CONFIG_NO_HZ */
 
 /*
  * calc_load - update the avenrun load estimates 10 ticks after the
@@ -3609,11 +3734,18 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
  */
 void calc_global_load(unsigned long ticks)
 {
-       long active;
+       long active, delta;
 
        if (time_before(jiffies, calc_load_update + 10))
                return;
 
+       /*
+        * Fold the 'old' idle-delta to include all NO_HZ cpus.
+        */
+       delta = calc_load_fold_idle();
+       if (delta)
+               atomic_long_add(delta, &calc_load_tasks);
+
        active = atomic_long_read(&calc_load_tasks);
        active = active > 0 ? active * FIXED_1 : 0;
 
@@ -3624,12 +3756,7 @@ void calc_global_load(unsigned long ticks)
        calc_load_update += LOAD_FREQ;
 
        /*
-        * Account one period with whatever state we found before
-        * folding in the nohz state and ageing the entire idle period.
-        *
-        * This avoids loosing a sample when we go idle between 
-        * calc_load_account_active() (10 ticks ago) and now and thus
-        * under-accounting.
+        * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
         */
        calc_global_nohz();
 }
@@ -3646,13 +3773,16 @@ static void calc_load_account_active(struct rq *this_rq)
                return;
 
        delta  = calc_load_fold_active(this_rq);
-       delta += calc_load_fold_idle();
        if (delta)
                atomic_long_add(delta, &calc_load_tasks);
 
        this_rq->calc_load_update += LOAD_FREQ;
 }
 
+/*
+ * End of global load-average stuff
+ */
+
 /*
  * The exact cpuload at various idx values, calculated at every tick would be
  * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
@@ -3725,22 +3855,13 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
  * scheduler tick (TICK_NSEC). With tickless idle this will not be called
  * every tick. We fix it up based on jiffies.
  */
-static void update_cpu_load(struct rq *this_rq)
+static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
+                             unsigned long pending_updates)
 {
-       unsigned long this_load = this_rq->load.weight;
-       unsigned long curr_jiffies = jiffies;
-       unsigned long pending_updates;
        int i, scale;
 
        this_rq->nr_load_updates++;
 
-       /* Avoid repeated calls on same jiffy, when moving in and out of idle */
-       if (curr_jiffies == this_rq->last_load_update_tick)
-               return;
-
-       pending_updates = curr_jiffies - this_rq->last_load_update_tick;
-       this_rq->last_load_update_tick = curr_jiffies;
-
        /* Update our load: */
        this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
        for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
@@ -3765,9 +3886,78 @@ static void update_cpu_load(struct rq *this_rq)
        sched_avg_update(this_rq);
 }
 
+#ifdef CONFIG_NO_HZ
+/*
+ * There is no sane way to deal with nohz on smp when using jiffies because the
+ * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
+ * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
+ *
+ * Therefore we cannot use the delta approach from the regular tick since that
+ * would seriously skew the load calculation. However we'll make do for those
+ * updates happening while idle (nohz_idle_balance) or coming out of idle
+ * (tick_nohz_idle_exit).
+ *
+ * This means we might still be one tick off for nohz periods.
+ */
+
+/*
+ * Called from nohz_idle_balance() to update the load ratings before doing the
+ * idle balance.
+ */
+static void update_idle_cpu_load(struct rq *this_rq)
+{
+       unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
+       unsigned long load = this_rq->load.weight;
+       unsigned long pending_updates;
+
+       /*
+        * bail if there's load or we're actually up-to-date.
+        */
+       if (load || curr_jiffies == this_rq->last_load_update_tick)
+               return;
+
+       pending_updates = curr_jiffies - this_rq->last_load_update_tick;
+       this_rq->last_load_update_tick = curr_jiffies;
+
+       __update_cpu_load(this_rq, load, pending_updates);
+}
+
+/*
+ * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
+ */
+void update_cpu_load_nohz(void)
+{
+       struct rq *this_rq = this_rq();
+       unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
+       unsigned long pending_updates;
+
+       if (curr_jiffies == this_rq->last_load_update_tick)
+               return;
+
+       raw_spin_lock(&this_rq->lock);
+       pending_updates = curr_jiffies - this_rq->last_load_update_tick;
+       if (pending_updates) {
+               this_rq->last_load_update_tick = curr_jiffies;
+               /*
+                * We were idle, this means load 0, the current load might be
+                * !0 due to remote wakeups and the sort.
+                */
+               __update_cpu_load(this_rq, 0, pending_updates);
+       }
+       raw_spin_unlock(&this_rq->lock);
+}
+#endif /* CONFIG_NO_HZ */
+
+/*
+ * Called from scheduler_tick()
+ */
 static void update_cpu_load_active(struct rq *this_rq)
 {
-       update_cpu_load(this_rq);
+       /*
+        * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
+        */
+       this_rq->last_load_update_tick = jiffies;
+       __update_cpu_load(this_rq, this_rq->load.weight, 1);
 
        calc_load_account_active(this_rq);
 }
index 8a39fa3..66e4576 100644 (file)
@@ -4735,7 +4735,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
 
                raw_spin_lock_irq(&this_rq->lock);
                update_rq_clock(this_rq);
-               update_cpu_load(this_rq);
+               update_idle_cpu_load(this_rq);
                raw_spin_unlock_irq(&this_rq->lock);
 
                rebalance_domains(balance_cpu, CPU_IDLE);
index 0a51882..be92bfe 100644 (file)
@@ -23,7 +23,6 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
 static struct task_struct *pick_next_task_idle(struct rq *rq)
 {
        schedstat_inc(rq, sched_goidle);
-       calc_load_account_idle(rq);
        return rq->idle;
 }
 
index 4b85a7a..f1eb182 100644 (file)
@@ -31,8 +31,6 @@ unsigned long                 tick_nsec;
 u64                            tick_length;
 static u64                     tick_length_base;
 
-static struct hrtimer          leap_timer;
-
 #define MAX_TICKADJ            500LL           /* usecs */
 #define MAX_TICKADJ_SCALED \
        (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
@@ -350,60 +348,60 @@ void ntp_clear(void)
 }
 
 /*
- * Leap second processing. If in leap-insert state at the end of the
- * day, the system clock is set back one second; if in leap-delete
- * state, the system clock is set ahead one second.
+ * this routine handles the overflow of the microsecond field
+ *
+ * The tricky bits of code to handle the accurate clock support
+ * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
+ * They were originally developed for SUN and DEC kernels.
+ * All the kudos should go to Dave for this stuff.
+ *
+ * Also handles leap second processing, and returns leap offset
  */
-static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
+int second_overflow(unsigned long secs)
 {
-       enum hrtimer_restart res = HRTIMER_NORESTART;
-
-       write_seqlock(&xtime_lock);
+       int leap = 0;
+       s64 delta;
 
+       /*
+        * Leap second processing. If in leap-insert state at the end of the
+        * day, the system clock is set back one second; if in leap-delete
+        * state, the system clock is set ahead one second.
+        */
        switch (time_state) {
        case TIME_OK:
+               if (time_status & STA_INS)
+                       time_state = TIME_INS;
+               else if (time_status & STA_DEL)
+                       time_state = TIME_DEL;
                break;
        case TIME_INS:
-               timekeeping_leap_insert(-1);
-               time_state = TIME_OOP;
-               printk(KERN_NOTICE
-                       "Clock: inserting leap second 23:59:60 UTC\n");
-               hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
-               res = HRTIMER_RESTART;
+               if (secs % 86400 == 0) {
+                       leap = -1;
+                       time_state = TIME_OOP;
+                       time_tai++;
+                       printk(KERN_NOTICE
+                               "Clock: inserting leap second 23:59:60 UTC\n");
+               }
                break;
        case TIME_DEL:
-               timekeeping_leap_insert(1);
-               time_tai--;
-               time_state = TIME_WAIT;
-               printk(KERN_NOTICE
-                       "Clock: deleting leap second 23:59:59 UTC\n");
+               if ((secs + 1) % 86400 == 0) {
+                       leap = 1;
+                       time_tai--;
+                       time_state = TIME_WAIT;
+                       printk(KERN_NOTICE
+                               "Clock: deleting leap second 23:59:59 UTC\n");
+               }
                break;
        case TIME_OOP:
-               time_tai++;
                time_state = TIME_WAIT;
-               /* fall through */
+               break;
+
        case TIME_WAIT:
                if (!(time_status & (STA_INS | STA_DEL)))
                        time_state = TIME_OK;
                break;
        }
 
-       write_sequnlock(&xtime_lock);
-
-       return res;
-}
-
-/*
- * this routine handles the overflow of the microsecond field
- *
- * The tricky bits of code to handle the accurate clock support
- * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
- * They were originally developed for SUN and DEC kernels.
- * All the kudos should go to Dave for this stuff.
- */
-void second_overflow(void)
-{
-       s64 delta;
 
        /* Bump the maxerror field */
        time_maxerror += MAXFREQ / NSEC_PER_USEC;
@@ -423,23 +421,25 @@ void second_overflow(void)
        pps_dec_valid();
 
        if (!time_adjust)
-               return;
+               goto out;
 
        if (time_adjust > MAX_TICKADJ) {
                time_adjust -= MAX_TICKADJ;
                tick_length += MAX_TICKADJ_SCALED;
-               return;
+               goto out;
        }
 
        if (time_adjust < -MAX_TICKADJ) {
                time_adjust += MAX_TICKADJ;
                tick_length -= MAX_TICKADJ_SCALED;
-               return;
+               goto out;
        }
 
        tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
                                                         << NTP_SCALE_SHIFT;
        time_adjust = 0;
+out:
+       return leap;
 }
 
 #ifdef CONFIG_GENERIC_CMOS_UPDATE
@@ -501,27 +501,6 @@ static void notify_cmos_timer(void)
 static inline void notify_cmos_timer(void) { }
 #endif
 
-/*
- * Start the leap seconds timer:
- */
-static inline void ntp_start_leap_timer(struct timespec *ts)
-{
-       long now = ts->tv_sec;
-
-       if (time_status & STA_INS) {
-               time_state = TIME_INS;
-               now += 86400 - now % 86400;
-               hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
-
-               return;
-       }
-
-       if (time_status & STA_DEL) {
-               time_state = TIME_DEL;
-               now += 86400 - (now + 1) % 86400;
-               hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
-       }
-}
 
 /*
  * Propagate a new txc->status value into the NTP state:
@@ -546,22 +525,6 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts)
        time_status &= STA_RONLY;
        time_status |= txc->status & ~STA_RONLY;
 
-       switch (time_state) {
-       case TIME_OK:
-               ntp_start_leap_timer(ts);
-               break;
-       case TIME_INS:
-       case TIME_DEL:
-               time_state = TIME_OK;
-               ntp_start_leap_timer(ts);
-       case TIME_WAIT:
-               if (!(time_status & (STA_INS | STA_DEL)))
-                       time_state = TIME_OK;
-               break;
-       case TIME_OOP:
-               hrtimer_restart(&leap_timer);
-               break;
-       }
 }
 /*
  * Called with the xtime lock held, so we can access and modify
@@ -643,9 +606,6 @@ int do_adjtimex(struct timex *txc)
                    (txc->tick <  900000/USER_HZ ||
                     txc->tick > 1100000/USER_HZ))
                        return -EINVAL;
-
-               if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
-                       hrtimer_cancel(&leap_timer);
        }
 
        if (txc->modes & ADJ_SETOFFSET) {
@@ -967,6 +927,4 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup);
 void __init ntp_init(void)
 {
        ntp_clear();
-       hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
-       leap_timer.function = ntp_leap_second;
 }
index c923640..793548c 100644 (file)
@@ -430,6 +430,7 @@ void tick_nohz_stop_sched_tick(int inidle)
                 */
                if (!ts->tick_stopped) {
                        select_nohz_load_balancer(1);
+                       calc_load_enter_idle();
 
                        ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
                        ts->tick_stopped = 1;
@@ -548,6 +549,7 @@ void tick_nohz_restart_sched_tick(void)
        /* Update jiffies first */
        select_nohz_load_balancer(0);
        tick_do_update_jiffies64(now);
+       update_cpu_load_nohz();
 
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
        /*
@@ -563,6 +565,7 @@ void tick_nohz_restart_sched_tick(void)
                account_idle_ticks(ticks);
 #endif
 
+       calc_load_exit_idle();
        touch_softlockup_watchdog();
        /*
         * Cancel the scheduled timer and restore the tick
index 2378413..03e67d4 100644 (file)
@@ -161,23 +161,43 @@ static struct timespec xtime __attribute__ ((aligned (16)));
 static struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
 static struct timespec total_sleep_time;
 
+/* Offset clock monotonic -> clock realtime */
+static ktime_t offs_real;
+
+/* Offset clock monotonic -> clock boottime */
+static ktime_t offs_boot;
+
 /*
  * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
  */
 static struct timespec raw_time;
 
-/* flag for if timekeeping is suspended */
-int __read_mostly timekeeping_suspended;
+/* must hold write on xtime_lock */
+static void update_rt_offset(void)
+{
+       struct timespec tmp, *wtm = &wall_to_monotonic;
 
-/* must hold xtime_lock */
-void timekeeping_leap_insert(int leapsecond)
+       set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
+       offs_real = timespec_to_ktime(tmp);
+}
+
+/* must hold write on xtime_lock */
+static void timekeeping_update(bool clearntp)
 {
-       xtime.tv_sec += leapsecond;
-       wall_to_monotonic.tv_sec -= leapsecond;
-       update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
-                       timekeeper.mult);
+       if (clearntp) {
+               timekeeper.ntp_error = 0;
+               ntp_clear();
+       }
+       update_rt_offset();
+       update_vsyscall(&xtime, &wall_to_monotonic,
+                        timekeeper.clock, timekeeper.mult);
 }
 
+
+
+/* flag for if timekeeping is suspended */
+int __read_mostly timekeeping_suspended;
+
 /**
  * timekeeping_forward_now - update clock to the current time
  *
@@ -375,11 +395,7 @@ int do_settimeofday(const struct timespec *tv)
 
        xtime = *tv;
 
-       timekeeper.ntp_error = 0;
-       ntp_clear();
-
-       update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
-                               timekeeper.mult);
+       timekeeping_update(true);
 
        write_sequnlock_irqrestore(&xtime_lock, flags);
 
@@ -412,11 +428,7 @@ int timekeeping_inject_offset(struct timespec *ts)
        xtime = timespec_add(xtime, *ts);
        wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts);
 
-       timekeeper.ntp_error = 0;
-       ntp_clear();
-
-       update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
-                               timekeeper.mult);
+       timekeeping_update(true);
 
        write_sequnlock_irqrestore(&xtime_lock, flags);
 
@@ -591,6 +603,7 @@ void __init timekeeping_init(void)
        }
        set_normalized_timespec(&wall_to_monotonic,
                                -boot.tv_sec, -boot.tv_nsec);
+       update_rt_offset();
        total_sleep_time.tv_sec = 0;
        total_sleep_time.tv_nsec = 0;
        write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -599,6 +612,12 @@ void __init timekeeping_init(void)
 /* time in seconds when suspend began */
 static struct timespec timekeeping_suspend_time;
 
+static void update_sleep_time(struct timespec t)
+{
+       total_sleep_time = t;
+       offs_boot = timespec_to_ktime(t);
+}
+
 /**
  * __timekeeping_inject_sleeptime - Internal function to add sleep interval
  * @delta: pointer to a timespec delta value
@@ -616,7 +635,7 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta)
 
        xtime = timespec_add(xtime, *delta);
        wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta);
-       total_sleep_time = timespec_add(total_sleep_time, *delta);
+       update_sleep_time(timespec_add(total_sleep_time, *delta));
 }
 
 
@@ -645,10 +664,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
 
        __timekeeping_inject_sleeptime(delta);
 
-       timekeeper.ntp_error = 0;
-       ntp_clear();
-       update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
-                               timekeeper.mult);
+       timekeeping_update(true);
 
        write_sequnlock_irqrestore(&xtime_lock, flags);
 
@@ -683,6 +699,7 @@ static void timekeeping_resume(void)
        timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
        timekeeper.ntp_error = 0;
        timekeeping_suspended = 0;
+       timekeeping_update(false);
        write_sequnlock_irqrestore(&xtime_lock, flags);
 
        touch_softlockup_watchdog();
@@ -942,9 +959,14 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
 
        timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
        while (timekeeper.xtime_nsec >= nsecps) {
+               int leap;
                timekeeper.xtime_nsec -= nsecps;
                xtime.tv_sec++;
-               second_overflow();
+               leap = second_overflow(xtime.tv_sec);
+               xtime.tv_sec += leap;
+               wall_to_monotonic.tv_sec -= leap;
+               if (leap)
+                       clock_was_set_delayed();
        }
 
        /* Accumulate raw time */
@@ -1050,14 +1072,17 @@ static void update_wall_time(void)
         * xtime.tv_nsec isn't larger then NSEC_PER_SEC
         */
        if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) {
+               int leap;
                xtime.tv_nsec -= NSEC_PER_SEC;
                xtime.tv_sec++;
-               second_overflow();
+               leap = second_overflow(xtime.tv_sec);
+               xtime.tv_sec += leap;
+               wall_to_monotonic.tv_sec -= leap;
+               if (leap)
+                       clock_was_set_delayed();
        }
 
-       /* check to see if there is a new clocksource to use */
-       update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
-                               timekeeper.mult);
+       timekeeping_update(false);
 }
 
 /**
@@ -1216,6 +1241,40 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
        } while (read_seqretry(&xtime_lock, seq));
 }
 
+#ifdef CONFIG_HIGH_RES_TIMERS
+/**
+ * ktime_get_update_offsets - hrtimer helper
+ * @real:      pointer to storage for monotonic -> realtime offset
+ * @_boot:     pointer to storage for monotonic -> boottime offset
+ *
+ * Returns current monotonic time and updates the offsets
+ * Called from hrtimer_interupt() or retrigger_next_event()
+ */
+ktime_t ktime_get_update_offsets(ktime_t *real, ktime_t *boot)
+{
+       ktime_t now;
+       unsigned int seq;
+       u64 secs, nsecs;
+
+       do {
+               seq = read_seqbegin(&xtime_lock);
+
+               secs = xtime.tv_sec;
+               nsecs = xtime.tv_nsec;
+               nsecs += timekeeping_get_ns();
+               /* If arch requires, add in gettimeoffset() */
+               nsecs += arch_gettimeoffset();
+
+               *real = offs_real;
+               *boot = offs_boot;
+       } while (read_seqretry(&xtime_lock, seq));
+
+       now = ktime_add_ns(ktime_set(secs, 0), nsecs);
+       now = ktime_sub(now, *real);
+       return now;
+}
+#endif
+
 /**
  * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
  */
index 697e49d..5638104 100644 (file)
@@ -2541,10 +2541,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
                if (cpumask_test_cpu(cpu, tracing_cpumask) &&
                                !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
                        atomic_inc(&global_trace.data[cpu]->disabled);
+                       ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
                }
                if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
                                cpumask_test_cpu(cpu, tracing_cpumask_new)) {
                        atomic_dec(&global_trace.data[cpu]->disabled);
+                       ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
                }
        }
        arch_spin_unlock(&ftrace_max_lock);
@@ -3456,6 +3458,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
                .pages          = pages_def,
                .partial        = partial_def,
                .nr_pages       = 0, /* This gets updated below. */
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &tracing_pipe_buf_ops,
                .spd_release    = tracing_spd_release_pipe,
@@ -3527,7 +3530,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
 
        ret = splice_to_pipe(pipe, &spd);
 out:
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 
 out_err:
@@ -4017,6 +4020,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages          = pages_def,
                .partial        = partial_def,
+               .nr_pages_max   = PIPE_DEF_BUFFERS,
                .flags          = flags,
                .ops            = &buffer_pipe_buf_ops,
                .spd_release    = buffer_spd_release,
@@ -4104,7 +4108,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        }
 
        ret = splice_to_pipe(pipe, &spd);
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 out:
        return ret;
 }
index 7947e16..a650bee 100644 (file)
@@ -3586,6 +3586,41 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
        return notifier_from_errno(0);
 }
 
+/*
+ * Workqueues should be brought up before normal priority CPU notifiers.
+ * This will be registered high priority CPU notifier.
+ */
+static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
+                                              unsigned long action,
+                                              void *hcpu)
+{
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_UP_PREPARE:
+       case CPU_UP_CANCELED:
+       case CPU_DOWN_FAILED:
+       case CPU_ONLINE:
+               return workqueue_cpu_callback(nfb, action, hcpu);
+       }
+       return NOTIFY_OK;
+}
+
+/*
+ * Workqueues should be brought down after normal priority CPU notifiers.
+ * This will be registered as low priority CPU notifier.
+ */
+static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
+                                                unsigned long action,
+                                                void *hcpu)
+{
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_DOWN_PREPARE:
+       case CPU_DYING:
+       case CPU_POST_DEAD:
+               return workqueue_cpu_callback(nfb, action, hcpu);
+       }
+       return NOTIFY_OK;
+}
+
 #ifdef CONFIG_SMP
 
 struct work_for_cpu {
@@ -3779,7 +3814,8 @@ static int __init init_workqueues(void)
        unsigned int cpu;
        int i;
 
-       cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
+       cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
+       cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
 
        /* initialize gcwqs */
        for_each_gcwq_cpu(cpu) {
index 8fb8a40..46973fb 100644 (file)
@@ -372,7 +372,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                }
 
                if (!cc->sync)
-                       mode |= ISOLATE_CLEAN;
+                       mode |= ISOLATE_ASYNC_MIGRATE;
 
                /* Try isolate the page */
                if (__isolate_lru_page(page, mode, 0) != 0)
@@ -577,7 +577,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                nr_migrate = cc->nr_migratepages;
                err = migrate_pages(&cc->migratepages, compaction_alloc,
                                (unsigned long)cc, false,
-                               cc->sync);
+                               cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
                update_nr_listpages(cc);
                nr_remaining = cc->nr_migratepages;
 
@@ -592,8 +592,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                if (err) {
                        putback_lru_pages(&cc->migratepages);
                        cc->nr_migratepages = 0;
+                       if (err == -ENOMEM) {
+                               ret = COMPACT_PARTIAL;
+                               goto out;
+                       }
                }
-
        }
 
 out:
index 03c5b0e..556858c 100644 (file)
@@ -500,10 +500,13 @@ struct page *__page_cache_alloc(gfp_t gfp)
        struct page *page;
 
        if (cpuset_do_page_mem_spread()) {
-               get_mems_allowed();
-               n = cpuset_mem_spread_node();
-               page = alloc_pages_exact_node(n, gfp, 0);
-               put_mems_allowed();
+               unsigned int cpuset_mems_cookie;
+               do {
+                       cpuset_mems_cookie = get_mems_allowed();
+                       n = cpuset_mem_spread_node();
+                       page = alloc_pages_exact_node(n, gfp, 0);
+               } while (!put_mems_allowed(cpuset_mems_cookie) && !page);
+
                return page;
        }
        return alloc_pages(gfp, 0);
index 5f5c545..b1e1bad 100644 (file)
@@ -53,6 +53,84 @@ static unsigned long __initdata default_hstate_size;
  */
 static DEFINE_SPINLOCK(hugetlb_lock);
 
+static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
+{
+       bool free = (spool->count == 0) && (spool->used_hpages == 0);
+
+       spin_unlock(&spool->lock);
+
+       /* If no pages are used, and no other handles to the subpool
+        * remain, free the subpool the subpool remain */
+       if (free)
+               kfree(spool);
+}
+
+struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
+{
+       struct hugepage_subpool *spool;
+
+       spool = kmalloc(sizeof(*spool), GFP_KERNEL);
+       if (!spool)
+               return NULL;
+
+       spin_lock_init(&spool->lock);
+       spool->count = 1;
+       spool->max_hpages = nr_blocks;
+       spool->used_hpages = 0;
+
+       return spool;
+}
+
+void hugepage_put_subpool(struct hugepage_subpool *spool)
+{
+       spin_lock(&spool->lock);
+       BUG_ON(!spool->count);
+       spool->count--;
+       unlock_or_release_subpool(spool);
+}
+
+static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
+                                     long delta)
+{
+       int ret = 0;
+
+       if (!spool)
+               return 0;
+
+       spin_lock(&spool->lock);
+       if ((spool->used_hpages + delta) <= spool->max_hpages) {
+               spool->used_hpages += delta;
+       } else {
+               ret = -ENOMEM;
+       }
+       spin_unlock(&spool->lock);
+
+       return ret;
+}
+
+static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
+                                      long delta)
+{
+       if (!spool)
+               return;
+
+       spin_lock(&spool->lock);
+       spool->used_hpages -= delta;
+       /* If hugetlbfs_put_super couldn't free spool due to
+       * an outstanding quota reference, free it now. */
+       unlock_or_release_subpool(spool);
+}
+
+static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
+{
+       return HUGETLBFS_SB(inode->i_sb)->spool;
+}
+
+static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
+{
+       return subpool_inode(vma->vm_file->f_dentry->d_inode);
+}
+
 /*
  * Region tracking -- allows tracking of reservations and instantiated pages
  *                    across the pages in a mapping.
@@ -460,8 +538,10 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
        struct zonelist *zonelist;
        struct zone *zone;
        struct zoneref *z;
+       unsigned int cpuset_mems_cookie;
 
-       get_mems_allowed();
+retry_cpuset:
+       cpuset_mems_cookie = get_mems_allowed();
        zonelist = huge_zonelist(vma, address,
                                        htlb_alloc_mask, &mpol, &nodemask);
        /*
@@ -488,10 +568,15 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
                        }
                }
        }
-err:
+
        mpol_cond_put(mpol);
-       put_mems_allowed();
+       if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+               goto retry_cpuset;
        return page;
+
+err:
+       mpol_cond_put(mpol);
+       return NULL;
 }
 
 static void update_and_free_page(struct hstate *h, struct page *page)
@@ -533,9 +618,9 @@ static void free_huge_page(struct page *page)
         */
        struct hstate *h = page_hstate(page);
        int nid = page_to_nid(page);
-       struct address_space *mapping;
+       struct hugepage_subpool *spool =
+               (struct hugepage_subpool *)page_private(page);
 
-       mapping = (struct address_space *) page_private(page);
        set_page_private(page, 0);
        page->mapping = NULL;
        BUG_ON(page_count(page));
@@ -551,8 +636,7 @@ static void free_huge_page(struct page *page)
                enqueue_huge_page(h, page);
        }
        spin_unlock(&hugetlb_lock);
-       if (mapping)
-               hugetlb_put_quota(mapping, 1);
+       hugepage_subpool_put_pages(spool, 1);
 }
 
 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
@@ -966,11 +1050,12 @@ static void return_unused_surplus_pages(struct hstate *h,
 /*
  * Determine if the huge page at addr within the vma has an associated
  * reservation.  Where it does not we will need to logically increase
- * reservation and actually increase quota before an allocation can occur.
- * Where any new reservation would be required the reservation change is
- * prepared, but not committed.  Once the page has been quota'd allocated
- * an instantiated the change should be committed via vma_commit_reservation.
- * No action is required on failure.
+ * reservation and actually increase subpool usage before an allocation
+ * can occur.  Where any new reservation would be required the
+ * reservation change is prepared, but not committed.  Once the page
+ * has been allocated from the subpool and instantiated the change should
+ * be committed via vma_commit_reservation.  No action is required on
+ * failure.
  */
 static long vma_needs_reservation(struct hstate *h,
                        struct vm_area_struct *vma, unsigned long addr)
@@ -1019,24 +1104,24 @@ static void vma_commit_reservation(struct hstate *h,
 static struct page *alloc_huge_page(struct vm_area_struct *vma,
                                    unsigned long addr, int avoid_reserve)
 {
+       struct hugepage_subpool *spool = subpool_vma(vma);
        struct hstate *h = hstate_vma(vma);
        struct page *page;
-       struct address_space *mapping = vma->vm_file->f_mapping;
-       struct inode *inode = mapping->host;
        long chg;
 
        /*
-        * Processes that did not create the mapping will have no reserves and
-        * will not have accounted against quota. Check that the quota can be
-        * made before satisfying the allocation
-        * MAP_NORESERVE mappings may also need pages and quota allocated
-        * if no reserve mapping overlaps.
+        * Processes that did not create the mapping will have no
+        * reserves and will not have accounted against subpool
+        * limit. Check that the subpool limit can be made before
+        * satisfying the allocation MAP_NORESERVE mappings may also
+        * need pages and subpool limit allocated allocated if no reserve
+        * mapping overlaps.
         */
        chg = vma_needs_reservation(h, vma, addr);
        if (chg < 0)
                return ERR_PTR(-VM_FAULT_OOM);
        if (chg)
-               if (hugetlb_get_quota(inode->i_mapping, chg))
+               if (hugepage_subpool_get_pages(spool, chg))
                        return ERR_PTR(-VM_FAULT_SIGBUS);
 
        spin_lock(&hugetlb_lock);
@@ -1046,12 +1131,12 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        if (!page) {
                page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
                if (!page) {
-                       hugetlb_put_quota(inode->i_mapping, chg);
+                       hugepage_subpool_put_pages(spool, chg);
                        return ERR_PTR(-VM_FAULT_SIGBUS);
                }
        }
 
-       set_page_private(page, (unsigned long) mapping);
+       set_page_private(page, (unsigned long)spool);
 
        vma_commit_reservation(h, vma, addr);
 
@@ -2081,6 +2166,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
 {
        struct hstate *h = hstate_vma(vma);
        struct resv_map *reservations = vma_resv_map(vma);
+       struct hugepage_subpool *spool = subpool_vma(vma);
        unsigned long reserve;
        unsigned long start;
        unsigned long end;
@@ -2096,7 +2182,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
 
                if (reserve) {
                        hugetlb_acct_memory(h, -reserve);
-                       hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
+                       hugepage_subpool_put_pages(spool, reserve);
                }
        }
 }
@@ -2326,7 +2412,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
        address = address & huge_page_mask(h);
        pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
                + (vma->vm_pgoff >> PAGE_SHIFT);
-       mapping = (struct address_space *)page_private(page);
+       mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
 
        /*
         * Take the mapping lock for the duration of the table walk. As
@@ -2865,11 +2951,12 @@ int hugetlb_reserve_pages(struct inode *inode,
 {
        long ret, chg;
        struct hstate *h = hstate_inode(inode);
+       struct hugepage_subpool *spool = subpool_inode(inode);
 
        /*
         * Only apply hugepage reservation if asked. At fault time, an
         * attempt will be made for VM_NORESERVE to allocate a page
-        * and filesystem quota without using reserves
+        * without using reserves
         */
        if (vm_flags & VM_NORESERVE)
                return 0;
@@ -2898,19 +2985,19 @@ int hugetlb_reserve_pages(struct inode *inode,
                goto out_err;
        }
 
-       /* There must be enough filesystem quota for the mapping */
-       if (hugetlb_get_quota(inode->i_mapping, chg)) {
+       /* There must be enough pages in the subpool for the mapping */
+       if (hugepage_subpool_get_pages(spool, chg)) {
                ret = -ENOSPC;
                goto out_err;
        }
 
        /*
         * Check enough hugepages are available for the reservation.
-        * Hand back the quota if there are not
+        * Hand the pages back to the subpool if there are not
         */
        ret = hugetlb_acct_memory(h, chg);
        if (ret < 0) {
-               hugetlb_put_quota(inode->i_mapping, chg);
+               hugepage_subpool_put_pages(spool, chg);
                goto out_err;
        }
 
@@ -2938,12 +3025,13 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
 {
        struct hstate *h = hstate_inode(inode);
        long chg = region_truncate(&inode->i_mapping->private_list, offset);
+       struct hugepage_subpool *spool = subpool_inode(inode);
 
        spin_lock(&inode->i_lock);
        inode->i_blocks -= (blocks_per_huge_page(h) * freed);
        spin_unlock(&inode->i_lock);
 
-       hugetlb_put_quota(inode->i_mapping, (chg - freed));
+       hugepage_subpool_put_pages(spool, (chg - freed));
        hugetlb_acct_memory(h, -(chg - freed));
 }
 
index 74bf193..23d3a6b 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/hugetlb.h>
 #include <linux/sched.h>
 #include <linux/ksm.h>
+#include <linux/file.h>
 
 /*
  * Any behaviour which results in changes to the vma->vm_flags needs to
@@ -197,14 +198,16 @@ static long madvise_remove(struct vm_area_struct *vma,
        struct address_space *mapping;
        loff_t offset, endoff;
        int error;
+       struct file *f;
 
        *prev = NULL;   /* tell sys_madvise we drop mmap_sem */
 
        if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
                return -EINVAL;
 
-       if (!vma->vm_file || !vma->vm_file->f_mapping
-               || !vma->vm_file->f_mapping->host) {
+       f = vma->vm_file;
+
+       if (!f || !f->f_mapping || !f->f_mapping->host) {
                        return -EINVAL;
        }
 
@@ -218,9 +221,16 @@ static long madvise_remove(struct vm_area_struct *vma,
        endoff = (loff_t)(end - vma->vm_start - 1)
                        + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
 
-       /* vmtruncate_range needs to take i_mutex */
+       /*
+        * vmtruncate_range may need to take i_mutex.  We need to
+        * explicitly grab a reference because the vma (and hence the
+        * vma's reference to the file) can go away as soon as we drop
+        * mmap_sem.
+        */
+       get_file(f);
        up_read(&current->mm->mmap_sem);
        error = vmtruncate_range(mapping->host, offset, endoff);
+       fput(f);
        down_read(&current->mm->mmap_sem);
        return error;
 }
index 06d3479..5bd5bb1 100644 (file)
@@ -1427,8 +1427,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
        /* Keep page count to indicate a given hugepage is isolated. */
 
        list_add(&hpage->lru, &pagelist);
-       ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
-                               true);
+       ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, false,
+                               MIGRATE_SYNC);
        if (ret) {
                struct page *page1, *page2;
                list_for_each_entry_safe(page1, page2, &pagelist, lru)
@@ -1557,7 +1557,7 @@ int soft_offline_page(struct page *page, int flags)
                                            page_is_file_cache(page));
                list_add(&page->lru, &pagelist);
                ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
-                                                               0, true);
+                                                       false, MIGRATE_SYNC);
                if (ret) {
                        putback_lru_pages(&pagelist);
                        pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
index 2168489..6629faf 100644 (file)
@@ -809,7 +809,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                }
                /* this function returns # of failed pages */
                ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
-                                                               true, true);
+                                                       true, MIGRATE_SYNC);
                if (ret)
                        putback_lru_pages(&source);
        }
index b26aae2..c0007f9 100644 (file)
@@ -942,7 +942,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
 
        if (!list_empty(&pagelist)) {
                err = migrate_pages(&pagelist, new_node_page, dest,
-                                                               false, true);
+                                                       false, MIGRATE_SYNC);
                if (err)
                        putback_lru_pages(&pagelist);
        }
@@ -1843,18 +1843,24 @@ struct page *
 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
                unsigned long addr, int node)
 {
-       struct mempolicy *pol = get_vma_policy(current, vma, addr);
+       struct mempolicy *pol;
        struct zonelist *zl;
        struct page *page;
+       unsigned int cpuset_mems_cookie;
+
+retry_cpuset:
+       pol = get_vma_policy(current, vma, addr);
+       cpuset_mems_cookie = get_mems_allowed();
 
-       get_mems_allowed();
        if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
                unsigned nid;
 
                nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
                mpol_cond_put(pol);
                page = alloc_page_interleave(gfp, order, nid);
-               put_mems_allowed();
+               if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+                       goto retry_cpuset;
+
                return page;
        }
        zl = policy_zonelist(gfp, pol, node);
@@ -1865,7 +1871,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
                struct page *page =  __alloc_pages_nodemask(gfp, order,
                                                zl, policy_nodemask(gfp, pol));
                __mpol_put(pol);
-               put_mems_allowed();
+               if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+                       goto retry_cpuset;
                return page;
        }
        /*
@@ -1873,7 +1880,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
         */
        page = __alloc_pages_nodemask(gfp, order, zl,
                                      policy_nodemask(gfp, pol));
-       put_mems_allowed();
+       if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+               goto retry_cpuset;
        return page;
 }
 
@@ -1900,11 +1908,14 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
 {
        struct mempolicy *pol = current->mempolicy;
        struct page *page;
+       unsigned int cpuset_mems_cookie;
 
        if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
                pol = &default_policy;
 
-       get_mems_allowed();
+retry_cpuset:
+       cpuset_mems_cookie = get_mems_allowed();
+
        /*
         * No reference counting needed for current->mempolicy
         * nor system default_policy
@@ -1915,7 +1926,10 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
                page = __alloc_pages_nodemask(gfp, order,
                                policy_zonelist(gfp, pol, numa_node_id()),
                                policy_nodemask(gfp, pol));
-       put_mems_allowed();
+
+       if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+               goto retry_cpuset;
+
        return page;
 }
 EXPORT_SYMBOL(alloc_pages_current);
index 177aca4..180d97f 100644 (file)
@@ -220,6 +220,56 @@ out:
        pte_unmap_unlock(ptep, ptl);
 }
 
+#ifdef CONFIG_BLOCK
+/* Returns true if all buffers are successfully locked */
+static bool buffer_migrate_lock_buffers(struct buffer_head *head,
+                                                       enum migrate_mode mode)
+{
+       struct buffer_head *bh = head;
+
+       /* Simple case, sync compaction */
+       if (mode != MIGRATE_ASYNC) {
+               do {
+                       get_bh(bh);
+                       lock_buffer(bh);
+                       bh = bh->b_this_page;
+
+               } while (bh != head);
+
+               return true;
+       }
+
+       /* async case, we cannot block on lock_buffer so use trylock_buffer */
+       do {
+               get_bh(bh);
+               if (!trylock_buffer(bh)) {
+                       /*
+                        * We failed to lock the buffer and cannot stall in
+                        * async migration. Release the taken locks
+                        */
+                       struct buffer_head *failed_bh = bh;
+                       put_bh(failed_bh);
+                       bh = head;
+                       while (bh != failed_bh) {
+                               unlock_buffer(bh);
+                               put_bh(bh);
+                               bh = bh->b_this_page;
+                       }
+                       return false;
+               }
+
+               bh = bh->b_this_page;
+       } while (bh != head);
+       return true;
+}
+#else
+static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
+                                                       enum migrate_mode mode)
+{
+       return true;
+}
+#endif /* CONFIG_BLOCK */
+
 /*
  * Replace the page in the mapping.
  *
@@ -229,7 +279,8 @@ out:
  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
  */
 static int migrate_page_move_mapping(struct address_space *mapping,
-               struct page *newpage, struct page *page)
+               struct page *newpage, struct page *page,
+               struct buffer_head *head, enum migrate_mode mode)
 {
        int expected_count;
        void **pslot;
@@ -258,6 +309,20 @@ static int migrate_page_move_mapping(struct address_space *mapping,
                return -EAGAIN;
        }
 
+       /*
+        * In the async migration case of moving a page with buffers, lock the
+        * buffers using trylock before the mapping is moved. If the mapping
+        * was moved, we later failed to lock the buffers and could not move
+        * the mapping back due to an elevated page count, we would have to
+        * block waiting on other references to be dropped.
+        */
+       if (mode == MIGRATE_ASYNC && head &&
+                       !buffer_migrate_lock_buffers(head, mode)) {
+               page_unfreeze_refs(page, expected_count);
+               spin_unlock_irq(&mapping->tree_lock);
+               return -EAGAIN;
+       }
+
        /*
         * Now we know that no one else is looking at the page.
         */
@@ -415,13 +480,14 @@ EXPORT_SYMBOL(fail_migrate_page);
  * Pages are locked upon entry and exit.
  */
 int migrate_page(struct address_space *mapping,
-               struct page *newpage, struct page *page)
+               struct page *newpage, struct page *page,
+               enum migrate_mode mode)
 {
        int rc;
 
        BUG_ON(PageWriteback(page));    /* Writeback must be complete */
 
-       rc = migrate_page_move_mapping(mapping, newpage, page);
+       rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
 
        if (rc)
                return rc;
@@ -438,28 +504,28 @@ EXPORT_SYMBOL(migrate_page);
  * exist.
  */
 int buffer_migrate_page(struct address_space *mapping,
-               struct page *newpage, struct page *page)
+               struct page *newpage, struct page *page, enum migrate_mode mode)
 {
        struct buffer_head *bh, *head;
        int rc;
 
        if (!page_has_buffers(page))
-               return migrate_page(mapping, newpage, page);
+               return migrate_page(mapping, newpage, page, mode);
 
        head = page_buffers(page);
 
-       rc = migrate_page_move_mapping(mapping, newpage, page);
+       rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
 
        if (rc)
                return rc;
 
-       bh = head;
-       do {
-               get_bh(bh);
-               lock_buffer(bh);
-               bh = bh->b_this_page;
-
-       } while (bh != head);
+       /*
+        * In the async case, migrate_page_move_mapping locked the buffers
+        * with an IRQ-safe spinlock held. In the sync case, the buffers
+        * need to be locked now
+        */
+       if (mode != MIGRATE_ASYNC)
+               BUG_ON(!buffer_migrate_lock_buffers(head, mode));
 
        ClearPagePrivate(page);
        set_page_private(newpage, page_private(page));
@@ -536,10 +602,14 @@ static int writeout(struct address_space *mapping, struct page *page)
  * Default handling if a filesystem does not provide a migration function.
  */
 static int fallback_migrate_page(struct address_space *mapping,
-       struct page *newpage, struct page *page)
+       struct page *newpage, struct page *page, enum migrate_mode mode)
 {
-       if (PageDirty(page))
+       if (PageDirty(page)) {
+               /* Only writeback pages in full synchronous migration */
+               if (mode != MIGRATE_SYNC)
+                       return -EBUSY;
                return writeout(mapping, page);
+       }
 
        /*
         * Buffers may be managed in a filesystem specific way.
@@ -549,7 +619,7 @@ static int fallback_migrate_page(struct address_space *mapping,
            !try_to_release_page(page, GFP_KERNEL))
                return -EAGAIN;
 
-       return migrate_page(mapping, newpage, page);
+       return migrate_page(mapping, newpage, page, mode);
 }
 
 /*
@@ -564,7 +634,7 @@ static int fallback_migrate_page(struct address_space *mapping,
  *  == 0 - success
  */
 static int move_to_new_page(struct page *newpage, struct page *page,
-                                       int remap_swapcache, bool sync)
+                               int remap_swapcache, enum migrate_mode mode)
 {
        struct address_space *mapping;
        int rc;
@@ -585,29 +655,18 @@ static int move_to_new_page(struct page *newpage, struct page *page,
 
        mapping = page_mapping(page);
        if (!mapping)
-               rc = migrate_page(mapping, newpage, page);
-       else {
+               rc = migrate_page(mapping, newpage, page, mode);
+       else if (mapping->a_ops->migratepage)
                /*
-                * Do not writeback pages if !sync and migratepage is
-                * not pointing to migrate_page() which is nonblocking
-                * (swapcache/tmpfs uses migratepage = migrate_page).
+                * Most pages have a mapping and most filesystems provide a
+                * migratepage callback. Anonymous pages are part of swap
+                * space which also has its own migratepage callback. This
+                * is the most common path for page migration.
                 */
-               if (PageDirty(page) && !sync &&
-                   mapping->a_ops->migratepage != migrate_page)
-                       rc = -EBUSY;
-               else if (mapping->a_ops->migratepage)
-                       /*
-                        * Most pages have a mapping and most filesystems
-                        * should provide a migration function. Anonymous
-                        * pages are part of swap space which also has its
-                        * own migration function. This is the most common
-                        * path for page migration.
-                        */
-                       rc = mapping->a_ops->migratepage(mapping,
-                                                       newpage, page);
-               else
-                       rc = fallback_migrate_page(mapping, newpage, page);
-       }
+               rc = mapping->a_ops->migratepage(mapping,
+                                               newpage, page, mode);
+       else
+               rc = fallback_migrate_page(mapping, newpage, page, mode);
 
        if (rc) {
                newpage->mapping = NULL;
@@ -622,7 +681,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
 }
 
 static int __unmap_and_move(struct page *page, struct page *newpage,
-                               int force, bool offlining, bool sync)
+                       int force, bool offlining, enum migrate_mode mode)
 {
        int rc = -EAGAIN;
        int remap_swapcache = 1;
@@ -631,7 +690,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
        struct anon_vma *anon_vma = NULL;
 
        if (!trylock_page(page)) {
-               if (!force || !sync)
+               if (!force || mode == MIGRATE_ASYNC)
                        goto out;
 
                /*
@@ -677,10 +736,12 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
 
        if (PageWriteback(page)) {
                /*
-                * For !sync, there is no point retrying as the retry loop
-                * is expected to be too short for PageWriteback to be cleared
+                * Only in the case of a full syncronous migration is it
+                * necessary to wait for PageWriteback. In the async case,
+                * the retry loop is too short and in the sync-light case,
+                * the overhead of stalling is too much
                 */
-               if (!sync) {
+               if (mode != MIGRATE_SYNC) {
                        rc = -EBUSY;
                        goto uncharge;
                }
@@ -751,7 +812,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
 
 skip_unmap:
        if (!page_mapped(page))
-               rc = move_to_new_page(newpage, page, remap_swapcache, sync);
+               rc = move_to_new_page(newpage, page, remap_swapcache, mode);
 
        if (rc && remap_swapcache)
                remove_migration_ptes(page, page);
@@ -774,7 +835,8 @@ out:
  * to the newly allocated page in newpage.
  */
 static int unmap_and_move(new_page_t get_new_page, unsigned long private,
-                       struct page *page, int force, bool offlining, bool sync)
+                       struct page *page, int force, bool offlining,
+                       enum migrate_mode mode)
 {
        int rc = 0;
        int *result = NULL;
@@ -792,7 +854,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
                if (unlikely(split_huge_page(page)))
                        goto out;
 
-       rc = __unmap_and_move(page, newpage, force, offlining, sync);
+       rc = __unmap_and_move(page, newpage, force, offlining, mode);
 out:
        if (rc != -EAGAIN) {
                /*
@@ -840,7 +902,8 @@ out:
  */
 static int unmap_and_move_huge_page(new_page_t get_new_page,
                                unsigned long private, struct page *hpage,
-                               int force, bool offlining, bool sync)
+                               int force, bool offlining,
+                               enum migrate_mode mode)
 {
        int rc = 0;
        int *result = NULL;
@@ -853,7 +916,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
        rc = -EAGAIN;
 
        if (!trylock_page(hpage)) {
-               if (!force || !sync)
+               if (!force || mode != MIGRATE_SYNC)
                        goto out;
                lock_page(hpage);
        }
@@ -864,7 +927,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
        try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
 
        if (!page_mapped(hpage))
-               rc = move_to_new_page(new_hpage, hpage, 1, sync);
+               rc = move_to_new_page(new_hpage, hpage, 1, mode);
 
        if (rc)
                remove_migration_ptes(hpage, hpage);
@@ -907,7 +970,7 @@ out:
  */
 int migrate_pages(struct list_head *from,
                new_page_t get_new_page, unsigned long private, bool offlining,
-               bool sync)
+               enum migrate_mode mode)
 {
        int retry = 1;
        int nr_failed = 0;
@@ -928,7 +991,7 @@ int migrate_pages(struct list_head *from,
 
                        rc = unmap_and_move(get_new_page, private,
                                                page, pass > 2, offlining,
-                                               sync);
+                                               mode);
 
                        switch(rc) {
                        case -ENOMEM:
@@ -958,7 +1021,7 @@ out:
 
 int migrate_huge_pages(struct list_head *from,
                new_page_t get_new_page, unsigned long private, bool offlining,
-               bool sync)
+               enum migrate_mode mode)
 {
        int retry = 1;
        int nr_failed = 0;
@@ -975,7 +1038,7 @@ int migrate_huge_pages(struct list_head *from,
 
                        rc = unmap_and_move_huge_page(get_new_page,
                                        private, page, pass > 2, offlining,
-                                       sync);
+                                       mode);
 
                        switch(rc) {
                        case -ENOMEM:
@@ -1104,7 +1167,7 @@ set_status:
        err = 0;
        if (!list_empty(&pagelist)) {
                err = migrate_pages(&pagelist, new_page_node,
-                               (unsigned long)pm, 0, true);
+                               (unsigned long)pm, 0, MIGRATE_SYNC);
                if (err)
                        putback_lru_pages(&pagelist);
        }
index 485be89..065dbe8 100644 (file)
@@ -1886,14 +1886,20 @@ static struct page *
 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        struct zonelist *zonelist, enum zone_type high_zoneidx,
        nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
-       int migratetype, unsigned long *did_some_progress,
-       bool sync_migration)
+       int migratetype, bool sync_migration,
+       bool *deferred_compaction,
+       unsigned long *did_some_progress)
 {
        struct page *page;
 
-       if (!order || compaction_deferred(preferred_zone))
+       if (!order)
                return NULL;
 
+       if (compaction_deferred(preferred_zone)) {
+               *deferred_compaction = true;
+               return NULL;
+       }
+
        current->flags |= PF_MEMALLOC;
        *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
                                                nodemask, sync_migration);
@@ -1921,7 +1927,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
                 * but not enough to satisfy watermarks.
                 */
                count_vm_event(COMPACTFAIL);
-               defer_compaction(preferred_zone);
+
+               /*
+                * As async compaction considers a subset of pageblocks, only
+                * defer if the failure was a sync compaction failure.
+                */
+               if (sync_migration)
+                       defer_compaction(preferred_zone);
 
                cond_resched();
        }
@@ -1933,8 +1945,9 @@ static inline struct page *
 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        struct zonelist *zonelist, enum zone_type high_zoneidx,
        nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
-       int migratetype, unsigned long *did_some_progress,
-       bool sync_migration)
+       int migratetype, bool sync_migration,
+       bool *deferred_compaction,
+       unsigned long *did_some_progress)
 {
        return NULL;
 }
@@ -2084,6 +2097,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        unsigned long pages_reclaimed = 0;
        unsigned long did_some_progress;
        bool sync_migration = false;
+       bool deferred_compaction = false;
 
        /*
         * In the slowpath, we sanity check order to avoid ever trying to
@@ -2164,12 +2178,22 @@ rebalance:
                                        zonelist, high_zoneidx,
                                        nodemask,
                                        alloc_flags, preferred_zone,
-                                       migratetype, &did_some_progress,
-                                       sync_migration);
+                                       migratetype, sync_migration,
+                                       &deferred_compaction,
+                                       &did_some_progress);
        if (page)
                goto got_pg;
        sync_migration = true;
 
+       /*
+        * If compaction is deferred for high-order allocations, it is because
+        * sync compaction recently failed. In this is the case and the caller
+        * has requested the system not be heavily disrupted, fail the
+        * allocation now instead of entering direct reclaim
+        */
+       if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD))
+               goto nopage;
+
        /* Try direct reclaim and then allocating */
        page = __alloc_pages_direct_reclaim(gfp_mask, order,
                                        zonelist, high_zoneidx,
@@ -2232,8 +2256,9 @@ rebalance:
                                        zonelist, high_zoneidx,
                                        nodemask,
                                        alloc_flags, preferred_zone,
-                                       migratetype, &did_some_progress,
-                                       sync_migration);
+                                       migratetype, sync_migration,
+                                       &deferred_compaction,
+                                       &did_some_progress);
                if (page)
                        goto got_pg;
        }
@@ -2257,8 +2282,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
 {
        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
        struct zone *preferred_zone;
-       struct page *page;
+       struct page *page = NULL;
        int migratetype = allocflags_to_migratetype(gfp_mask);
+       unsigned int cpuset_mems_cookie;
 
        gfp_mask &= gfp_allowed_mask;
 
@@ -2277,15 +2303,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
        if (unlikely(!zonelist->_zonerefs->zone))
                return NULL;
 
-       get_mems_allowed();
+retry_cpuset:
+       cpuset_mems_cookie = get_mems_allowed();
+
        /* The preferred zone is used for statistics later */
        first_zones_zonelist(zonelist, high_zoneidx,
                                nodemask ? : &cpuset_current_mems_allowed,
                                &preferred_zone);
-       if (!preferred_zone) {
-               put_mems_allowed();
-               return NULL;
-       }
+       if (!preferred_zone)
+               goto out;
 
        /* First allocation attempt */
        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
@@ -2295,9 +2321,19 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
                page = __alloc_pages_slowpath(gfp_mask, order,
                                zonelist, high_zoneidx, nodemask,
                                preferred_zone, migratetype);
-       put_mems_allowed();
 
        trace_mm_page_alloc(page, order, gfp_mask, migratetype);
+
+out:
+       /*
+        * When updating a task's mems_allowed, it is possible to race with
+        * parallel threads in such a way that an allocation can fail while
+        * the mask is being updated. If a page allocation is about to fail,
+        * check if the cpuset changed during allocation and if so, retry.
+        */
+       if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+               goto retry_cpuset;
+
        return page;
 }
 EXPORT_SYMBOL(__alloc_pages_nodemask);
@@ -2521,13 +2557,15 @@ void si_meminfo_node(struct sysinfo *val, int nid)
 bool skip_free_areas_node(unsigned int flags, int nid)
 {
        bool ret = false;
+       unsigned int cpuset_mems_cookie;
 
        if (!(flags & SHOW_MEM_FILTER_NODES))
                goto out;
 
-       get_mems_allowed();
-       ret = !node_isset(nid, cpuset_current_mems_allowed);
-       put_mems_allowed();
+       do {
+               cpuset_mems_cookie = get_mems_allowed();
+               ret = !node_isset(nid, cpuset_current_mems_allowed);
+       } while (!put_mems_allowed(cpuset_mems_cookie));
 out:
        return ret;
 }
@@ -3407,25 +3445,33 @@ static void setup_zone_migrate_reserve(struct zone *zone)
                if (page_to_nid(page) != zone_to_nid(zone))
                        continue;
 
-               /* Blocks with reserved pages will never free, skip them. */
-               block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
-               if (pageblock_is_reserved(pfn, block_end_pfn))
-                       continue;
-
                block_migratetype = get_pageblock_migratetype(page);
 
-               /* If this block is reserved, account for it */
-               if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
-                       reserve--;
-                       continue;
-               }
+               /* Only test what is necessary when the reserves are not met */
+               if (reserve > 0) {
+                       /*
+                        * Blocks with reserved pages will never free, skip
+                        * them.
+                        */
+                       block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
+                       if (pageblock_is_reserved(pfn, block_end_pfn))
+                               continue;
 
-               /* Suitable for reserving if this block is movable */
-               if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
-                       set_pageblock_migratetype(page, MIGRATE_RESERVE);
-                       move_freepages_block(zone, page, MIGRATE_RESERVE);
-                       reserve--;
-                       continue;
+                       /* If this block is reserved, account for it */
+                       if (block_migratetype == MIGRATE_RESERVE) {
+                               reserve--;
+                               continue;
+                       }
+
+                       /* Suitable for reserving if this block is movable */
+                       if (block_migratetype == MIGRATE_MOVABLE) {
+                               set_pageblock_migratetype(page,
+                                                       MIGRATE_RESERVE);
+                               move_freepages_block(zone, page,
+                                                       MIGRATE_RESERVE);
+                               reserve--;
+                               continue;
+                       }
                }
 
                /*
index 6b8e9fb..a6f0e98 100644 (file)
@@ -1359,6 +1359,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = PIPE_DEF_BUFFERS,
                .flags = flags,
                .ops = &page_cache_pipe_buf_ops,
                .spd_release = spd_release_page,
@@ -1447,7 +1448,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
 
        if (error > 0) {
                *ppos += error;
index 83311c9..cd3ab93 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3267,12 +3267,10 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
        if (in_interrupt() || (flags & __GFP_THISNODE))
                return NULL;
        nid_alloc = nid_here = numa_mem_id();
-       get_mems_allowed();
        if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
                nid_alloc = cpuset_slab_spread_node();
        else if (current->mempolicy)
                nid_alloc = slab_node(current->mempolicy);
-       put_mems_allowed();
        if (nid_alloc != nid_here)
                return ____cache_alloc_node(cachep, flags, nid_alloc);
        return NULL;
@@ -3295,14 +3293,17 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
        enum zone_type high_zoneidx = gfp_zone(flags);
        void *obj = NULL;
        int nid;
+       unsigned int cpuset_mems_cookie;
 
        if (flags & __GFP_THISNODE)
                return NULL;
 
-       get_mems_allowed();
-       zonelist = node_zonelist(slab_node(current->mempolicy), flags);
        local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
 
+retry_cpuset:
+       cpuset_mems_cookie = get_mems_allowed();
+       zonelist = node_zonelist(slab_node(current->mempolicy), flags);
+
 retry:
        /*
         * Look through allowed nodes for objects available
@@ -3355,7 +3356,9 @@ retry:
                        }
                }
        }
-       put_mems_allowed();
+
+       if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !obj))
+               goto retry_cpuset;
        return obj;
 }
 
index af47188..5710788 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1582,6 +1582,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
        struct zone *zone;
        enum zone_type high_zoneidx = gfp_zone(flags);
        void *object;
+       unsigned int cpuset_mems_cookie;
 
        /*
         * The defrag ratio allows a configuration of the tradeoffs between
@@ -1605,23 +1606,32 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
                        get_cycles() % 1024 > s->remote_node_defrag_ratio)
                return NULL;
 
-       get_mems_allowed();
-       zonelist = node_zonelist(slab_node(current->mempolicy), flags);
-       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
-               struct kmem_cache_node *n;
-
-               n = get_node(s, zone_to_nid(zone));
-
-               if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
-                               n->nr_partial > s->min_partial) {
-                       object = get_partial_node(s, n, c);
-                       if (object) {
-                               put_mems_allowed();
-                               return object;
+       do {
+               cpuset_mems_cookie = get_mems_allowed();
+               zonelist = node_zonelist(slab_node(current->mempolicy), flags);
+               for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+                       struct kmem_cache_node *n;
+
+                       n = get_node(s, zone_to_nid(zone));
+
+                       if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
+                                       n->nr_partial > s->min_partial) {
+                               object = get_partial_node(s, n, c);
+                               if (object) {
+                                       /*
+                                        * Return the object even if
+                                        * put_mems_allowed indicated that
+                                        * the cpuset mems_allowed was
+                                        * updated in parallel. It's a
+                                        * harmless race between the alloc
+                                        * and the cpuset update.
+                                        */
+                                       put_mems_allowed(cpuset_mems_cookie);
+                                       return object;
+                               }
                        }
                }
-       }
-       put_mems_allowed();
+       } while (!put_mems_allowed(cpuset_mems_cookie));
 #endif
        return NULL;
 }
index fbe2d2c..48febd7 100644 (file)
@@ -715,7 +715,13 @@ static enum page_references page_check_references(struct page *page,
                 */
                SetPageReferenced(page);
 
-               if (referenced_page)
+               if (referenced_page || referenced_ptes > 1)
+                       return PAGEREF_ACTIVATE;
+
+               /*
+                * Activate file-backed executable pages after first usage.
+                */
+               if (vm_flags & VM_EXEC)
                        return PAGEREF_ACTIVATE;
 
                return PAGEREF_KEEP;
@@ -1061,8 +1067,39 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
 
        ret = -EBUSY;
 
-       if ((mode & ISOLATE_CLEAN) && (PageDirty(page) || PageWriteback(page)))
-               return ret;
+       /*
+        * To minimise LRU disruption, the caller can indicate that it only
+        * wants to isolate pages it will be able to operate on without
+        * blocking - clean pages for the most part.
+        *
+        * ISOLATE_CLEAN means that only clean pages should be isolated. This
+        * is used by reclaim when it is cannot write to backing storage
+        *
+        * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
+        * that it is possible to migrate without blocking
+        */
+       if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
+               /* All the caller can do on PageWriteback is block */
+               if (PageWriteback(page))
+                       return ret;
+
+               if (PageDirty(page)) {
+                       struct address_space *mapping;
+
+                       /* ISOLATE_CLEAN means only clean pages */
+                       if (mode & ISOLATE_CLEAN)
+                               return ret;
+
+                       /*
+                        * Only pages without mappings or that have a
+                        * ->migratepage callback are possible to migrate
+                        * without blocking
+                        */
+                       mapping = page_mapping(page);
+                       if (mapping && !mapping->a_ops->migratepage)
+                               return ret;
+               }
+       }
 
        if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
                return ret;
@@ -1178,7 +1215,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                         * anon page which don't already have a swap slot is
                         * pointless.
                         */
-                       if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
+                       if (nr_swap_pages <= 0 && PageSwapBacked(cursor_page) &&
                            !PageSwapCache(cursor_page))
                                break;
 
@@ -1874,7 +1911,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
         * latencies, so it's better to scan a minimum amount there as
         * well.
         */
-       if (scanning_global_lru(sc) && current_is_kswapd())
+       if (scanning_global_lru(sc) && current_is_kswapd() &&
+           zone->all_unreclaimable)
                force_scan = true;
        if (!scanning_global_lru(sc))
                force_scan = true;
@@ -2012,8 +2050,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
         * inactive lists are large enough, continue reclaiming
         */
        pages_for_compaction = (2UL << sc->order);
-       inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON) +
-                               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+       inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+       if (nr_swap_pages > 0)
+               inactive_lru_pages += zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
        if (sc->nr_reclaimed < pages_for_compaction &&
                        inactive_lru_pages > pages_for_compaction)
                return true;
@@ -2088,6 +2127,42 @@ restart:
        throttle_vm_writeout(sc->gfp_mask);
 }
 
+/* Returns true if compaction should go ahead for a high-order request */
+static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
+{
+       unsigned long balance_gap, watermark;
+       bool watermark_ok;
+
+       /* Do not consider compaction for orders reclaim is meant to satisfy */
+       if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
+               return false;
+
+       /*
+        * Compaction takes time to run and there are potentially other
+        * callers using the pages just freed. Continue reclaiming until
+        * there is a buffer of free pages available to give compaction
+        * a reasonable chance of completing and allocating the page
+        */
+       balance_gap = min(low_wmark_pages(zone),
+               (zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
+                       KSWAPD_ZONE_BALANCE_GAP_RATIO);
+       watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
+       watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
+
+       /*
+        * If compaction is deferred, reclaim up to a point where
+        * compaction will have a chance of success when re-enabled
+        */
+       if (compaction_deferred(zone))
+               return watermark_ok;
+
+       /* If compaction is not ready to start, keep reclaiming */
+       if (!compaction_suitable(zone, sc->order))
+               return false;
+
+       return watermark_ok;
+}
+
 /*
  * This is the direct reclaim path, for page-allocating processes.  We only
  * try to reclaim pages from zones which will satisfy the caller's allocation
@@ -2105,8 +2180,9 @@ restart:
  * scan then give up on it.
  *
  * This function returns true if a zone is being reclaimed for a costly
- * high-order allocation and compaction is either ready to begin or deferred.
- * This indicates to the caller that it should retry the allocation or fail.
+ * high-order allocation and compaction is ready to begin. This indicates to
+ * the caller that it should consider retrying the allocation instead of
+ * further reclaim.
  */
 static bool shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
@@ -2115,7 +2191,7 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
        struct zone *zone;
        unsigned long nr_soft_reclaimed;
        unsigned long nr_soft_scanned;
-       bool should_abort_reclaim = false;
+       bool aborted_reclaim = false;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                        gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -2140,10 +2216,8 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
                                 * noticable problem, like transparent huge page
                                 * allocations.
                                 */
-                               if (sc->order > PAGE_ALLOC_COSTLY_ORDER &&
-                                       (compaction_suitable(zone, sc->order) ||
-                                        compaction_deferred(zone))) {
-                                       should_abort_reclaim = true;
+                               if (compaction_ready(zone, sc)) {
+                                       aborted_reclaim = true;
                                        continue;
                                }
                        }
@@ -2165,7 +2239,7 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
                shrink_zone(priority, zone, sc);
        }
 
-       return should_abort_reclaim;
+       return aborted_reclaim;
 }
 
 static bool zone_reclaimable(struct zone *zone)
@@ -2219,8 +2293,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
        struct zoneref *z;
        struct zone *zone;
        unsigned long writeback_threshold;
+       bool aborted_reclaim;
 
-       get_mems_allowed();
        delayacct_freepages_start();
 
        if (scanning_global_lru(sc))
@@ -2230,8 +2304,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                sc->nr_scanned = 0;
                if (!priority)
                        disable_swap_token(sc->mem_cgroup);
-               if (shrink_zones(priority, zonelist, sc))
-                       break;
+               aborted_reclaim = shrink_zones(priority, zonelist, sc);
 
                /*
                 * Don't shrink slabs when reclaiming memory from
@@ -2285,7 +2358,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
 
 out:
        delayacct_freepages_end();
-       put_mems_allowed();
 
        if (sc->nr_reclaimed)
                return sc->nr_reclaimed;
@@ -2298,6 +2370,10 @@ out:
        if (oom_killer_disabled)
                return 0;
 
+       /* Aborted reclaim to try compaction? don't OOM, then */
+       if (aborted_reclaim)
+               return 1;
+
        /* top priority shrink_zones still had more to do? don't OOM, then */
        if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
                return 1;
@@ -2824,7 +2900,10 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
                 * them before going back to sleep.
                 */
                set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
-               schedule();
+
+               if (!kthread_should_stop())
+                       schedule();
+
                set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
        } else {
                if (remaining)
@@ -3090,14 +3169,17 @@ int kswapd_run(int nid)
 }
 
 /*
- * Called by memory hotplug when all memory in a node is offlined.
+ * Called by memory hotplug when all memory in a node is offlined.  Caller must
+ * hold lock_memory_hotplug().
  */
 void kswapd_stop(int nid)
 {
        struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
 
-       if (kswapd)
+       if (kswapd) {
                kthread_stop(kswapd);
+               NODE_DATA(nid)->kswapd = NULL;
+       }
 }
 
 static int __init kswapd_init(void)
index f961cc5..da587ad 100644 (file)
@@ -619,6 +619,8 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
                /* packet needs to be linearized to access the TT changes */
                if (skb_linearize(skb) < 0)
                        goto out;
+               /* skb_linearize() possibly changed skb->data */
+               tt_query = (struct tt_query_packet *)skb->data;
 
                if (is_my_mac(tt_query->dst))
                        handle_tt_response(bat_priv, tt_query);
index 5f09a57..088af45 100644 (file)
@@ -1816,10 +1816,10 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
 {
        struct tt_local_entry *tt_local_entry = NULL;
        struct tt_global_entry *tt_global_entry = NULL;
-       bool ret = true;
+       bool ret = false;
 
        if (!atomic_read(&bat_priv->ap_isolation))
-               return false;
+               goto out;
 
        tt_local_entry = tt_local_hash_find(bat_priv, dst);
        if (!tt_local_entry)
@@ -1829,10 +1829,10 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
        if (!tt_global_entry)
                goto out;
 
-       if (_is_ap_isolated(tt_local_entry, tt_global_entry))
+       if (!_is_ap_isolated(tt_local_entry, tt_global_entry))
                goto out;
 
-       ret = false;
+       ret = true;
 
 out:
        if (tt_global_entry)
index f603e5b..f3f75ad 100644 (file)
@@ -240,6 +240,7 @@ int br_add_bridge(struct net *net, const char *name)
                return -ENOMEM;
 
        dev_net_set(dev, net);
+       dev->rtnl_link_ops = &br_link_ops;
 
        res = register_netdev(dev);
        if (res)
index a1daf82..cbf9ccd 100644 (file)
@@ -211,7 +211,7 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
        return 0;
 }
 
-static struct rtnl_link_ops br_link_ops __read_mostly = {
+struct rtnl_link_ops br_link_ops __read_mostly = {
        .kind           = "bridge",
        .priv_size      = sizeof(struct net_bridge),
        .setup          = br_dev_setup,
index 93264df..b9bba8f 100644 (file)
@@ -536,6 +536,7 @@ extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr)
 #endif
 
 /* br_netlink.c */
+extern struct rtnl_link_ops br_link_ops;
 extern int br_netlink_init(void);
 extern void br_netlink_fini(void);
 extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
index cde1b4a..46cca3a 100644 (file)
@@ -681,9 +681,6 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
        if (err < 0)
                goto free_skb;
 
-       /* to be able to check the received tx sock reference in raw_rcv() */
-       skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
-
        skb->dev = dev;
        skb->sk  = sk;
 
index 1cbddc9..5738654 100644 (file)
@@ -2079,25 +2079,6 @@ static int dev_gso_segment(struct sk_buff *skb, int features)
        return 0;
 }
 
-/*
- * Try to orphan skb early, right before transmission by the device.
- * We cannot orphan skb if tx timestamp is requested or the sk-reference
- * is needed on driver level for other reasons, e.g. see net/can/raw.c
- */
-static inline void skb_orphan_try(struct sk_buff *skb)
-{
-       struct sock *sk = skb->sk;
-
-       if (sk && !skb_shinfo(skb)->tx_flags) {
-               /* skb_tx_hash() wont be able to get sk.
-                * We copy sk_hash into skb->rxhash
-                */
-               if (!skb->rxhash)
-                       skb->rxhash = sk->sk_hash;
-               skb_orphan(skb);
-       }
-}
-
 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
 {
        return ((features & NETIF_F_GEN_CSUM) ||
@@ -2182,8 +2163,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                if (!list_empty(&ptype_all))
                        dev_queue_xmit_nit(skb, dev);
 
-               skb_orphan_try(skb);
-
                features = netif_skb_features(skb);
 
                if (vlan_tx_tag_present(skb) &&
@@ -2293,7 +2272,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
        if (skb->sk && skb->sk->sk_hash)
                hash = skb->sk->sk_hash;
        else
-               hash = (__force u16) skb->protocol ^ skb->rxhash;
+               hash = (__force u16) skb->protocol;
        hash = jhash_1word(hash, hashrnd);
 
        return (u16) (((u64) hash * qcount) >> 32) + qoffset;
index 2b587ec..2367246 100644 (file)
@@ -1672,6 +1672,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_GRXCSUM:
        case ETHTOOL_GTXCSUM:
        case ETHTOOL_GSG:
+       case ETHTOOL_GSSET_INFO:
        case ETHTOOL_GSTRINGS:
        case ETHTOOL_GTSO:
        case ETHTOOL_GPERMADDR:
index ab0633f..db4bb7a 100644 (file)
@@ -351,22 +351,23 @@ EXPORT_SYMBOL(netpoll_send_skb_on_dev);
 
 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
 {
-       int total_len, eth_len, ip_len, udp_len;
+       int total_len, ip_len, udp_len;
        struct sk_buff *skb;
        struct udphdr *udph;
        struct iphdr *iph;
        struct ethhdr *eth;
 
        udp_len = len + sizeof(*udph);
-       ip_len = eth_len = udp_len + sizeof(*iph);
-       total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
+       ip_len = udp_len + sizeof(*iph);
+       total_len = ip_len + LL_RESERVED_SPACE(np->dev);
 
-       skb = find_skb(np, total_len, total_len - len);
+       skb = find_skb(np, total_len + np->dev->needed_tailroom,
+                      total_len - len);
        if (!skb)
                return;
 
        skb_copy_to_linear_data(skb, msg, len);
-       skb->len += len;
+       skb_put(skb, len);
 
        skb_push(skb, sizeof(*udph));
        skb_reset_transport_header(skb);
index 2ec200d..af9c3c6 100644 (file)
@@ -1663,6 +1663,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
+               .nr_pages_max = MAX_SKB_FRAGS,
                .flags = flags,
                .ops = &sock_pipe_buf_ops,
                .spd_release = sock_spd_release,
@@ -1709,7 +1710,7 @@ done:
                lock_sock(sk);
        }
 
-       splice_shrink_spd(pipe, &spd);
+       splice_shrink_spd(&spd);
        return ret;
 }
 
index b23f174..8d095b9 100644 (file)
@@ -1497,6 +1497,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
        gfp_t gfp_mask;
        long timeo;
        int err;
+       int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+       err = -EMSGSIZE;
+       if (npages > MAX_SKB_FRAGS)
+               goto failure;
 
        gfp_mask = sk->sk_allocation;
        if (gfp_mask & __GFP_WAIT)
@@ -1515,14 +1520,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
                if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
                        skb = alloc_skb(header_len, gfp_mask);
                        if (skb) {
-                               int npages;
                                int i;
 
                                /* No pages, we're done... */
                                if (!data_len)
                                        break;
 
-                               npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
                                skb->truesize += data_len;
                                skb_shinfo(skb)->nr_frags = npages;
                                for (i = 0; i < npages; i++) {
index 9726927..32e6ca2 100644 (file)
@@ -5836,6 +5836,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        goto discard;
 
                if (th->syn) {
+                       if (th->fin)
+                               goto discard;
                        if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
                                return 1;
 
index 059b9d9..2e21751 100644 (file)
@@ -2881,10 +2881,6 @@ static int __net_init ip6_route_net_init(struct net *net)
        net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
        net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
 
-#ifdef CONFIG_PROC_FS
-       proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
-       proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
-#endif
        net->ipv6.ip6_rt_gc_expire = 30*HZ;
 
        ret = 0;
@@ -2905,10 +2901,6 @@ out_ip6_dst_ops:
 
 static void __net_exit ip6_route_net_exit(struct net *net)
 {
-#ifdef CONFIG_PROC_FS
-       proc_net_remove(net, "ipv6_route");
-       proc_net_remove(net, "rt6_stats");
-#endif
        kfree(net->ipv6.ip6_null_entry);
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
        kfree(net->ipv6.ip6_prohibit_entry);
@@ -2917,11 +2909,33 @@ static void __net_exit ip6_route_net_exit(struct net *net)
        dst_entries_destroy(&net->ipv6.ip6_dst_ops);
 }
 
+static int __net_init ip6_route_net_init_late(struct net *net)
+{
+#ifdef CONFIG_PROC_FS
+       proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
+       proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
+#endif
+       return 0;
+}
+
+static void __net_exit ip6_route_net_exit_late(struct net *net)
+{
+#ifdef CONFIG_PROC_FS
+       proc_net_remove(net, "ipv6_route");
+       proc_net_remove(net, "rt6_stats");
+#endif
+}
+
 static struct pernet_operations ip6_route_net_ops = {
        .init = ip6_route_net_init,
        .exit = ip6_route_net_exit,
 };
 
+static struct pernet_operations ip6_route_net_late_ops = {
+       .init = ip6_route_net_init_late,
+       .exit = ip6_route_net_exit_late,
+};
+
 static struct notifier_block ip6_route_dev_notifier = {
        .notifier_call = ip6_route_dev_notify,
        .priority = 0,
@@ -2971,19 +2985,25 @@ int __init ip6_route_init(void)
        if (ret)
                goto xfrm6_init;
 
+       ret = register_pernet_subsys(&ip6_route_net_late_ops);
+       if (ret)
+               goto fib6_rules_init;
+
        ret = -ENOBUFS;
        if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
            __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
            __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
-               goto fib6_rules_init;
+               goto out_register_late_subsys;
 
        ret = register_netdevice_notifier(&ip6_route_dev_notifier);
        if (ret)
-               goto fib6_rules_init;
+               goto out_register_late_subsys;
 
 out:
        return ret;
 
+out_register_late_subsys:
+       unregister_pernet_subsys(&ip6_route_net_late_ops);
 fib6_rules_init:
        fib6_rules_cleanup();
 xfrm6_init:
@@ -3002,6 +3022,7 @@ out_kmem_cache:
 void ip6_route_cleanup(void)
 {
        unregister_netdevice_notifier(&ip6_route_dev_notifier);
+       unregister_pernet_subsys(&ip6_route_net_late_ops);
        fib6_rules_cleanup();
        xfrm6_fini();
        fib6_gc_cleanup();
index 274d150..cf98d62 100644 (file)
@@ -380,7 +380,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
                        skb_trim(skb, skb->dev->mtu);
        }
        skb->protocol = ETH_P_AF_IUCV;
-       skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
        nskb = skb_clone(skb, GFP_ATOMIC);
        if (!nskb)
                return -ENOMEM;
index d2726a7..3c55f63 100644 (file)
@@ -167,6 +167,7 @@ static void l2tp_eth_delete(struct l2tp_session *session)
                if (dev) {
                        unregister_netdev(dev);
                        spriv->dev = NULL;
+                       module_put(THIS_MODULE);
                }
        }
 }
@@ -254,6 +255,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
        if (rc < 0)
                goto out_del_dev;
 
+       __module_get(THIS_MODULE);
        /* Must be done after register_netdev() */
        strlcpy(session->ifname, dev->name, IFNAMSIZ);
 
index 2fbbe1f..6c7e609 100644 (file)
@@ -515,10 +515,12 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
                                           sk->sk_bound_dev_if);
                if (IS_ERR(rt))
                        goto no_route;
-               if (connected)
+               if (connected) {
                        sk_setup_caps(sk, &rt->dst);
-               else
-                       dst_release(&rt->dst); /* safe since we hold rcu_read_lock */
+               } else {
+                       skb_dst_set(skb, &rt->dst);
+                       goto xmit;
+               }
        }
 
        /* We dont need to clone dst here, it is guaranteed to not disappear.
@@ -526,6 +528,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
         */
        skb_dst_set_noref(skb, &rt->dst);
 
+xmit:
        /* Queue the packet to IP for output */
        rc = ip_queue_xmit(skb, &inet->cork.fl);
        rcu_read_unlock();
index 064d20f..cda4875 100644 (file)
@@ -2389,7 +2389,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
         * frames that we didn't handle, including returning unknown
         * ones. For all other modes we will return them to the sender,
         * setting the 0x80 bit in the action category, as required by
-        * 802.11-2007 7.3.1.11.
+        * 802.11-2012 9.24.4.
         * Newer versions of hostapd shall also use the management frame
         * registration mechanisms, but older ones still use cooked
         * monitor interfaces so push all frames there.
@@ -2399,6 +2399,9 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
             sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
                return RX_DROP_MONITOR;
 
+       if (is_multicast_ether_addr(mgmt->da))
+               return RX_DROP_MONITOR;
+
        /* do not return rejected action frames */
        if (mgmt->u.action.category & 0x80)
                return RX_DROP_UNUSABLE;
index 96633f5..12b6a80 100644 (file)
@@ -86,7 +86,7 @@ static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
        nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
        data += 2;
 
-       nfca_poll->nfcid1_len = *data++;
+       nfca_poll->nfcid1_len = min_t(__u8, *data++, sizeof(nfca_poll->nfcid1));
 
        nfc_dbg("sens_res 0x%x, nfcid1_len %d",
                nfca_poll->sens_res,
@@ -111,7 +111,7 @@ static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
 
        switch (ntf->rf_interface_type) {
        case NCI_RF_INTERFACE_ISO_DEP:
-               nfca_poll_iso_dep->rats_res_len = *data++;
+               nfca_poll_iso_dep->rats_res_len = min_t(__u8, *data++, 20);
                if (nfca_poll_iso_dep->rats_res_len > 0) {
                        memcpy(nfca_poll_iso_dep->rats_res,
                                data,
index ee7b2b3..7a167fc 100644 (file)
@@ -52,7 +52,10 @@ static int rawsock_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
 
-       nfc_dbg("sock=%p", sock);
+       nfc_dbg("sock=%p sk=%p", sock, sk);
+
+       if (!sk)
+               return 0;
 
        sock_orphan(sk);
        sock_put(sk);
index c1c99dd..d57d05b 100644 (file)
@@ -1369,7 +1369,7 @@ static void reg_set_request_processed(void)
        spin_unlock(&reg_requests_lock);
 
        if (last_request->initiator == NL80211_REGDOM_SET_BY_USER)
-               cancel_delayed_work_sync(&reg_timeout);
+               cancel_delayed_work(&reg_timeout);
 
        if (need_more_processing)
                schedule_work(&reg_work);
index d38815d..74d5292 100644 (file)
@@ -813,7 +813,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
             ntype == NL80211_IFTYPE_P2P_CLIENT))
                return -EBUSY;
 
-       if (ntype != otype) {
+       if (ntype != otype && netif_running(dev)) {
                err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
                                                    ntype);
                if (err)
index a272356..2ae4817 100755 (executable)
@@ -9,12 +9,6 @@ fi
 DEPMOD=$1
 KERNELRELEASE=$2
 
-if ! "$DEPMOD" -V 2>/dev/null | grep -q module-init-tools; then
-       echo "Warning: you may need to install module-init-tools" >&2
-       echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt" >&2
-       sleep 1
-fi
-
 if ! test -r System.map -a -x "$DEPMOD"; then
        exit 0
 fi
index c505fd5..c119f33 100644 (file)
@@ -868,7 +868,6 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
        struct hdmi_spec_per_pin *per_pin;
        struct hdmi_eld *eld;
        struct hdmi_spec_per_cvt *per_cvt = NULL;
-       int pinctl;
 
        /* Validate hinfo */
        pin_idx = hinfo_to_pin_index(spec, hinfo);
@@ -904,11 +903,6 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
        snd_hda_codec_write(codec, per_pin->pin_nid, 0,
                            AC_VERB_SET_CONNECT_SEL,
                            mux_idx);
-       pinctl = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
-                                   AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
-       snd_hda_codec_write(codec, per_pin->pin_nid, 0,
-                           AC_VERB_SET_PIN_WIDGET_CONTROL,
-                           pinctl | PIN_OUT);
        snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
 
        /* Initially set the converter's capabilities */
@@ -1147,11 +1141,17 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
        struct hdmi_spec *spec = codec->spec;
        int pin_idx = hinfo_to_pin_index(spec, hinfo);
        hda_nid_t pin_nid = spec->pins[pin_idx].pin_nid;
+       int pinctl;
 
        hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
 
        hdmi_setup_audio_infoframe(codec, pin_idx, substream);
 
+       pinctl = snd_hda_codec_read(codec, pin_nid, 0,
+                                   AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
+       snd_hda_codec_write(codec, pin_nid, 0,
+                           AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl | PIN_OUT);
+
        return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
 }
 
index 0005bde..191fd78 100644 (file)
@@ -5988,6 +5988,8 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
        { .id = 0x10ec0272, .name = "ALC272", .patch = patch_alc662 },
        { .id = 0x10ec0275, .name = "ALC275", .patch = patch_alc269 },
        { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
+       { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
+       { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
        { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
          .patch = patch_alc861 },
        { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
index 7b7a516..2b973f5 100644 (file)
@@ -4457,7 +4457,7 @@ static int stac92xx_init(struct hda_codec *codec)
                                         AC_PINCTL_IN_EN);
        for (i = 0; i < spec->num_pwrs; i++)  {
                hda_nid_t nid = spec->pwr_nids[i];
-               int pinctl, def_conf;
+               unsigned int pinctl, def_conf;
 
                /* power on when no jack detection is available */
                /* or when the VREF is used for controlling LED */
@@ -4484,7 +4484,7 @@ static int stac92xx_init(struct hda_codec *codec)
                def_conf = get_defcfg_connect(def_conf);
                /* skip any ports that don't have jacks since presence
                 * detection is useless */
-               if (def_conf != AC_JACK_PORT_NONE &&
+               if (def_conf != AC_JACK_PORT_COMPLEX ||
                    !is_jack_detectable(codec, nid)) {
                        stac_toggle_power_map(codec, nid, 1);
                        continue;
index 87d5ef1..8b48801 100644 (file)
@@ -963,9 +963,7 @@ static int aic3x_hw_params(struct snd_pcm_substream *substream,
        }
 
 found:
-       data = snd_soc_read(codec, AIC3X_PLL_PROGA_REG);
-       snd_soc_write(codec, AIC3X_PLL_PROGA_REG,
-                     data | (pll_p << PLLP_SHIFT));
+       snd_soc_update_bits(codec, AIC3X_PLL_PROGA_REG, PLLP_MASK, pll_p);
        snd_soc_write(codec, AIC3X_OVRF_STATUS_AND_PLLR_REG,
                      pll_r << PLLR_SHIFT);
        snd_soc_write(codec, AIC3X_PLL_PROGB_REG, pll_j << PLLJ_SHIFT);
index 06a1978..16d9999 100644 (file)
 
 /* PLL registers bitfields */
 #define PLLP_SHIFT             0
+#define PLLP_MASK              7
 #define PLLQ_SHIFT             3
 #define PLLR_SHIFT             0
 #define PLLJ_SHIFT             2
index 90e93bf..0dc441c 100644 (file)
@@ -1381,7 +1381,15 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
        }
 
        list_for_each_entry(w, &card->widgets, list) {
-               list_del_init(&w->dirty);
+               switch (w->id) {
+               case snd_soc_dapm_pre:
+               case snd_soc_dapm_post:
+                       /* These widgets always need to be powered */
+                       break;
+               default:
+                       list_del_init(&w->dirty);
+                       break;
+               }
 
                if (w->power) {
                        d = w->dapm;
index 11224ed..323d4d9 100644 (file)
@@ -384,14 +384,18 @@ int main(void)
        pfd.fd = fd;
 
        while (1) {
+               struct sockaddr *addr_p = (struct sockaddr *) &addr;
+               socklen_t addr_l = sizeof(addr);
                pfd.events = POLLIN;
                pfd.revents = 0;
                poll(&pfd, 1, -1);
 
-               len = recv(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0);
+               len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0,
+                               addr_p, &addr_l);
 
-               if (len < 0) {
-                       syslog(LOG_ERR, "recv failed; error:%d", len);
+               if (len < 0 || addr.nl_pid) {
+                       syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s",
+                                       addr.nl_pid, errno, strerror(errno));
                        close(fd);
                        return -1;
                }
index 9f614b4..272407c 100644 (file)
@@ -318,6 +318,7 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
         */
        hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link)
                if (ei->type == KVM_IRQ_ROUTING_MSI ||
+                   ue->type == KVM_IRQ_ROUTING_MSI ||
                    ue->u.irqchip.irqchip == ei->irqchip.irqchip)
                        return r;