Linus, usually the patches that have already been included in the
-next kernel for a few weeks. The preferred way to submit big changes
is using git (the kernel's source management tool, more information
- can be found at http://git.or.cz/) but plain patches are also just
+ can be found at http://git-scm.com/) but plain patches are also just
fine.
- After two weeks a -rc1 kernel is released it is now possible to push
only patches that do not include new features that could affect the
- cgroup.procs: list of tgids in the cgroup. This list is not
guaranteed to be sorted or free of duplicate tgids, and userspace
should sort/uniquify the list if this property is required.
- Writing a tgid into this file moves all threads with that tgid into
- this cgroup.
+ This is a read-only file, for now.
- notify_on_release flag: run the release agent on exit?
- release_agent: the path to use for release notifications (this file
exists in the top cgroup only)
libata.force= [LIBATA] Force configurations. The format is comma
separated list of "[ID:]VAL" where ID is
- PORT[:DEVICE]. PORT and DEVICE are decimal numbers
+ PORT[.DEVICE]. PORT and DEVICE are decimal numbers
matching port, link or device. Basically, it matches
the ATA ID string printed on console by libata. If
the whole ID part is omitted, the last PORT and DEVICE
- It cannot contain any "trivial" fixes in it (spelling changes,
whitespace cleanups, etc).
- It must follow the Documentation/SubmittingPatches rules.
- - It or an equivalent fix must already exist in Linus' tree. Quote the
- respective commit ID in Linus' tree in your patch submission to -stable.
+ - It or an equivalent fix must already exist in Linus' tree (upstream).
Procedure for submitting patches to the -stable tree:
- Send the patch, after verifying that it follows the above rules, to
- stable@kernel.org.
- - To have the patch automatically included in the stable tree, add the
- the tag
+ stable@kernel.org. You must note the upstream commit ID in the changelog
+ of your submission.
+ - To have the patch automatically included in the stable tree, add the tag
Cc: stable@kernel.org
in the sign-off area. Once the patch is merged it will be applied to
the stable tree without anything else needing to be done by the author
DRM DRIVERS
M: David Airlie <airlied@linux.ie>
-L: dri-devel@lists.sourceforge.net
+L: dri-devel@lists.freedesktop.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git
S: Maintained
F: drivers/gpu/drm/
S390 ZFCP DRIVER
M: Christof Schmitt <christof.schmitt@de.ibm.com>
-M: Martin Peschke <mp3@de.ibm.com>
+M: Swen Schillig <swen@vnet.ibm.com>
M: linux390@de.ibm.com
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
-F: Documentation/s390/zfcpdump.txt
F: drivers/s390/scsi/zfcp_*
S390 IUCV NETWORK LAYER
{
struct kvm_memory_slot *memslot;
int r, i;
- long n, base;
+ long base;
+ unsigned long n;
unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
if (!memslot->dirty_bitmap)
goto out;
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
base = memslot->base_gfn / BITS_PER_LONG;
for (i = 0; i < n/sizeof(long); ++i) {
struct kvm_dirty_log *log)
{
int r;
- int n;
+ unsigned long n;
struct kvm_memory_slot *memslot;
int is_dirty = 0;
if (is_dirty) {
kvm_flush_remote_tlbs(kvm);
memslot = &kvm->memslots->memslots[log->slot];
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n);
}
r = 0;
#define MCFUART_URF_RXS 0xc0 /* Receiver status */
#endif
+#if defined(CONFIG_M5272)
+#define MCFUART_TXFIFOSIZE 25
+#else
+#define MCFUART_TXFIFOSIZE 1
+#endif
/****************************************************************************/
#endif /* mcfuart_h */
cflags-$(CONFIG_M523x) := $(call cc-option,-mcpu=523x,-m5307)
cflags-$(CONFIG_M5249) := $(call cc-option,-mcpu=5249,-m5200)
cflags-$(CONFIG_M5271) := $(call cc-option,-mcpu=5271,-m5307)
-cflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5271,-m5200)
+cflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5272,-m5307)
cflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307)
cflags-$(CONFIG_M528x) := $(call cc-option,-m528x,-m5307)
cflags-$(CONFIG_M5307) := $(call cc-option,-m5307,-m5200)
trap #0
ENTRY(ret_from_user_rt_signal)
- move #__NR_rt_sigreturn,%d0
+ movel #__NR_rt_sigreturn,%d0
trap #0
_ramvec[vba+CPMVEC_PIO_PC7] = inthandler; /* pio - pc7 */
_ramvec[vba+CPMVEC_PIO_PC6] = inthandler; /* pio - pc6 */
_ramvec[vba+CPMVEC_TIMER3] = inthandler; /* timer 3 */
- _ramvec[vba+CPMVEC_RISCTIMER] = inthandler; /* reserved */
_ramvec[vba+CPMVEC_PIO_PC5] = inthandler; /* pio - pc5 */
_ramvec[vba+CPMVEC_PIO_PC4] = inthandler; /* pio - pc4 */
_ramvec[vba+CPMVEC_RESERVED2] = inthandler; /* reserved */
struct kvm_vcpu *vcpu;
ulong ga, ga_end;
int is_dirty = 0;
- int r, n;
+ int r;
+ unsigned long n;
mutex_lock(&kvm->slots_lock);
kvm_for_each_vcpu(n, vcpu, kvm)
kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n);
}
__u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
__u32 tz_dsttime; /* Type of dst correction 0x34 */
__u32 ectg_available;
+ __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */
};
struct vdso_per_cpu_data {
DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
+ DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult));
DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
/* constants used by the vdso */
lghi %r2,0
brasl %r14,arch_set_page_states
+ /* Reinitialize the channel subsystem */
+ brasl %r14,channel_subsystem_reinit
+
/* Return 0 */
lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
lghi %r2,0
vdso_data->xtime_clock_nsec = wall_time->tv_nsec;
vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
+ vdso_data->ntp_mult = mult;
smp_wmb();
++vdso_data->tb_update_count;
}
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,2f
ahi %r0,-1
-2: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */
+2: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
lr %r2,%r0
- lhi %r0,1000
+ l %r0,__VDSO_NTP_MULT(%r5)
ltr %r1,%r1
mr %r0,%r0
jnm 3f
- ahi %r0,1000
+ a %r0,__VDSO_NTP_MULT(%r5)
3: alr %r0,%r2
srdl %r0,12
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,12f
ahi %r0,-1
-12: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */
+12: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
lr %r2,%r0
- lhi %r0,1000
+ l %r0,__VDSO_NTP_MULT(%r5)
ltr %r1,%r1
mr %r0,%r0
jnm 13f
- ahi %r0,1000
+ a %r0,__VDSO_NTP_MULT(%r5)
13: alr %r0,%r2
srdl %r0,12
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
sl %r1,__VDSO_XTIME_STAMP+4(%r5)
brc 3,3f
ahi %r0,-1
-3: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */
+3: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */
st %r0,24(%r15)
- lhi %r0,1000
+ l %r0,__VDSO_NTP_MULT(%r5)
ltr %r1,%r1
mr %r0,%r0
jnm 4f
- ahi %r0,1000
+ a %r0,__VDSO_NTP_MULT(%r5)
4: al %r0,24(%r15)
srdl %r0,12
al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
stck 48(%r15) /* Store TOD clock */
lg %r1,48(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
- mghi %r1,1000
+ msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
lg %r0,__VDSO_XTIME_SEC(%r5)
stck 48(%r15) /* Store TOD clock */
lg %r1,48(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
- mghi %r1,1000
+ msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */
lg %r0,__VDSO_XTIME_SEC(%r5)
stck 48(%r15) /* Store TOD clock */
lg %r1,48(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
- mghi %r1,1000
+ msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */
srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */
alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */
lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
#include "linux/irqreturn.h"
#include "linux/kd.h"
#include "linux/sched.h"
+#include "linux/slab.h"
#include "chan_kern.h"
#include "irq_kern.h"
#include "irq_user.h"
#include <errno.h>
#include <sched.h>
#include <linux/limits.h>
-#include <linux/slab.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include "kern_constants.h"
*/
#include <linux/dmi.h>
+#include <linux/module.h>
#include <asm/div64.h>
#include <asm/vmware.h>
#include <asm/x86_init.h>
return 0;
}
+EXPORT_SYMBOL(vmware_platform);
/*
* VMware hypervisor takes care of exporting a reliable TSC to the guest.
for_each_sp(pages, sp, parents, i) {
kvm_mmu_zap_page(kvm, sp);
mmu_pages_clear_parents(&parents);
+ zapped++;
}
- zapped += pages.nr;
kvm_mmu_pages_init(parent, &parents, &pages);
}
*/
if (used_pages > kvm_nr_mmu_pages) {
- while (used_pages > kvm_nr_mmu_pages) {
+ while (used_pages > kvm_nr_mmu_pages &&
+ !list_empty(&kvm->arch.active_mmu_pages)) {
struct kvm_mmu_page *page;
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
- kvm_mmu_zap_page(kvm, page);
+ used_pages -= kvm_mmu_zap_page(kvm, page);
used_pages--;
}
+ kvm_nr_mmu_pages = used_pages;
kvm->arch.n_free_mmu_pages = 0;
}
else
&& !sp->role.invalid) {
pgprintk("%s: zap %lx %x\n",
__func__, gfn, sp->role.word);
- kvm_mmu_zap_page(kvm, sp);
+ if (kvm_mmu_zap_page(kvm, sp))
+ nn = bucket->first;
}
}
}
if (err)
goto free_svm;
+ err = -ENOMEM;
page = alloc_page(GFP_KERNEL);
- if (!page) {
- err = -ENOMEM;
+ if (!page)
goto uninit;
- }
- err = -ENOMEM;
msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
if (!msrpm_pages)
- goto uninit;
+ goto free_page1;
nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
if (!nested_msrpm_pages)
- goto uninit;
-
- svm->msrpm = page_address(msrpm_pages);
- svm_vcpu_init_msrpm(svm->msrpm);
+ goto free_page2;
hsave_page = alloc_page(GFP_KERNEL);
if (!hsave_page)
- goto uninit;
+ goto free_page3;
+
svm->nested.hsave = page_address(hsave_page);
+ svm->msrpm = page_address(msrpm_pages);
+ svm_vcpu_init_msrpm(svm->msrpm);
+
svm->nested.msrpm = page_address(nested_msrpm_pages);
svm->vmcb = page_address(page);
return &svm->vcpu;
+free_page3:
+ __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
+free_page2:
+ __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
+free_page1:
+ __free_page(page);
uninit:
kvm_vcpu_uninit(&svm->vcpu);
free_svm:
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
+#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
+
/*
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
* ple_gap: upper bound on the amount of time between two successive
} host_state;
struct {
int vm86_active;
- u8 save_iopl;
+ ulong save_rflags;
struct kvm_save_segment {
u16 selector;
unsigned long base;
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
{
- unsigned long rflags;
+ unsigned long rflags, save_rflags;
rflags = vmcs_readl(GUEST_RFLAGS);
- if (to_vmx(vcpu)->rmode.vm86_active)
- rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
+ if (to_vmx(vcpu)->rmode.vm86_active) {
+ rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
+ save_rflags = to_vmx(vcpu)->rmode.save_rflags;
+ rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
+ }
return rflags;
}
static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
- if (to_vmx(vcpu)->rmode.vm86_active)
+ if (to_vmx(vcpu)->rmode.vm86_active) {
+ to_vmx(vcpu)->rmode.save_rflags = rflags;
rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
+ }
vmcs_writel(GUEST_RFLAGS, rflags);
}
vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
flags = vmcs_readl(GUEST_RFLAGS);
- flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
- flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
+ flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
+ flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
vmcs_writel(GUEST_RFLAGS, flags);
vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
flags = vmcs_readl(GUEST_RFLAGS);
- vmx->rmode.save_iopl
- = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+ vmx->rmode.save_rflags = flags;
flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
#ifdef CONFIG_X86_64
if (cr0 & 0xffffffff00000000UL) {
- printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
- cr0, kvm_read_cr0(vcpu));
kvm_inject_gp(vcpu, 0);
return;
}
cr0 &= ~CR0_RESERVED_BITS;
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
- printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
kvm_inject_gp(vcpu, 0);
return;
}
if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
- printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
- "and a clear PE flag\n");
kvm_inject_gp(vcpu, 0);
return;
}
int cs_db, cs_l;
if (!is_pae(vcpu)) {
- printk(KERN_DEBUG "set_cr0: #GP, start paging "
- "in long mode while PAE is disabled\n");
kvm_inject_gp(vcpu, 0);
return;
}
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
if (cs_l) {
- printk(KERN_DEBUG "set_cr0: #GP, start paging "
- "in long mode while CS.L == 1\n");
kvm_inject_gp(vcpu, 0);
return;
} else
#endif
if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
- printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
- "reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
if (cr4 & CR4_RESERVED_BITS) {
- printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE)) {
- printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
- "in long mode\n");
kvm_inject_gp(vcpu, 0);
return;
}
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
&& ((cr4 ^ old_cr4) & pdptr_bits)
&& !load_pdptrs(vcpu, vcpu->arch.cr3)) {
- printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
if (cr4 & X86_CR4_VMXE) {
- printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
kvm_inject_gp(vcpu, 0);
return;
}
if (is_long_mode(vcpu)) {
if (cr3 & CR3_L_MODE_RESERVED_BITS) {
- printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
} else {
if (is_pae(vcpu)) {
if (cr3 & CR3_PAE_RESERVED_BITS) {
- printk(KERN_DEBUG
- "set_cr3: #GP, reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
- printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
- "reserved bits\n");
kvm_inject_gp(vcpu, 0);
return;
}
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{
if (cr8 & CR8_RESERVED_BITS) {
- printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
kvm_inject_gp(vcpu, 0);
return;
}
static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
if (efer & efer_reserved_bits) {
- printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
- efer);
kvm_inject_gp(vcpu, 0);
return;
}
if (is_paging(vcpu)
&& (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
- printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
kvm_inject_gp(vcpu, 0);
return;
}
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
- printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
kvm_inject_gp(vcpu, 0);
return;
}
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
- printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
kvm_inject_gp(vcpu, 0);
return;
}
if (msr >= MSR_IA32_MC0_CTL &&
msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
u32 offset = msr - MSR_IA32_MC0_CTL;
- /* only 0 or all 1s can be written to IA32_MCi_CTL */
+ /* only 0 or all 1s can be written to IA32_MCi_CTL
+ * some Linux kernels though clear bit 10 in bank 4 to
+ * workaround a BIOS/GART TBL issue on AMD K8s, ignore
+ * this to avoid an uncatched #GP in the guest
+ */
if ((offset & 0x3) == 0 &&
- data != 0 && data != ~(u64)0)
+ data != 0 && (data | (1 << 10)) != ~(u64)0)
return -1;
vcpu->arch.mce_banks[offset] = data;
break;
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{
- int r, n, i;
+ int r, i;
struct kvm_memory_slot *memslot;
+ unsigned long n;
unsigned long is_dirty = 0;
unsigned long *dirty_bitmap = NULL;
if (!memslot->dirty_bitmap)
goto out;
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
r = -ENOMEM;
dirty_bitmap = vmalloc(n);
kvm_set_cr8(vcpu, kvm_run->cr8);
if (vcpu->arch.pio.cur_count) {
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = complete_pio(vcpu);
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (r)
goto out;
}
int ret = 0;
u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
+ u32 desc_limit;
old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
}
}
- if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
+ desc_limit = get_desc_limit(&nseg_desc);
+ if (!nseg_desc.p ||
+ ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
+ desc_limit < 0x2b)) {
kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
return 1;
}
struct acpi_resource_address64 *addr)
{
acpi_status status;
-
- status = acpi_resource_to_address64(resource, addr);
- if (ACPI_SUCCESS(status) &&
- (addr->resource_type == ACPI_MEMORY_RANGE ||
- addr->resource_type == ACPI_IO_RANGE) &&
- addr->address_length > 0 &&
- addr->producer_consumer == ACPI_PRODUCER) {
+ struct acpi_resource_memory24 *memory24;
+ struct acpi_resource_memory32 *memory32;
+ struct acpi_resource_fixed_memory32 *fixed_memory32;
+
+ memset(addr, 0, sizeof(*addr));
+ switch (resource->type) {
+ case ACPI_RESOURCE_TYPE_MEMORY24:
+ memory24 = &resource->data.memory24;
+ addr->resource_type = ACPI_MEMORY_RANGE;
+ addr->minimum = memory24->minimum;
+ addr->address_length = memory24->address_length;
+ addr->maximum = addr->minimum + addr->address_length - 1;
+ return AE_OK;
+ case ACPI_RESOURCE_TYPE_MEMORY32:
+ memory32 = &resource->data.memory32;
+ addr->resource_type = ACPI_MEMORY_RANGE;
+ addr->minimum = memory32->minimum;
+ addr->address_length = memory32->address_length;
+ addr->maximum = addr->minimum + addr->address_length - 1;
return AE_OK;
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ fixed_memory32 = &resource->data.fixed_memory32;
+ addr->resource_type = ACPI_MEMORY_RANGE;
+ addr->minimum = fixed_memory32->address;
+ addr->address_length = fixed_memory32->address_length;
+ addr->maximum = addr->minimum + addr->address_length - 1;
+ return AE_OK;
+ case ACPI_RESOURCE_TYPE_ADDRESS16:
+ case ACPI_RESOURCE_TYPE_ADDRESS32:
+ case ACPI_RESOURCE_TYPE_ADDRESS64:
+ status = acpi_resource_to_address64(resource, addr);
+ if (ACPI_SUCCESS(status) &&
+ (addr->resource_type == ACPI_MEMORY_RANGE ||
+ addr->resource_type == ACPI_IO_RANGE) &&
+ addr->address_length > 0) {
+ return AE_OK;
+ }
+ break;
}
return AE_ERROR;
}
void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ struct request_queue *q = qc->scsicmd->device->request_queue;
+ unsigned long flags;
WARN_ON(!ap->ops->error_handler);
* Note that ATA_QCFLAG_FAILED is unconditionally set after
* this function completes.
*/
+ spin_lock_irqsave(q->queue_lock, flags);
blk_abort_request(qc->scsicmd->request);
+ spin_unlock_irqrestore(q->queue_lock, flags);
}
/**
}
/* okay, this error is ours */
+ memset(&tf, 0, sizeof(tf));
rc = ata_eh_read_log_10h(dev, &tag, &tf);
if (rc) {
ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
+ PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17),
+ PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10),
PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
+ PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d),
+ PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47),
PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
unsigned int cpu = sys_dev->id;
unsigned long flags;
struct cpufreq_policy *data;
+ struct kobject *kobj;
+ struct completion *cmp;
#ifdef CONFIG_SMP
struct sys_device *cpu_sys_dev;
unsigned int j;
dprintk("removing link\n");
cpumask_clear_cpu(cpu, data->cpus);
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- sysfs_remove_link(&sys_dev->kobj, "cpufreq");
+ kobj = &sys_dev->kobj;
cpufreq_cpu_put(data);
cpufreq_debug_enable_ratelimit();
unlock_policy_rwsem_write(cpu);
+ sysfs_remove_link(kobj, "cpufreq");
return 0;
}
#endif
data->governor->name, CPUFREQ_NAME_LEN);
#endif
cpu_sys_dev = get_cpu_sysdev(j);
- sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
+ kobj = &cpu_sys_dev->kobj;
+ unlock_policy_rwsem_write(cpu);
+ sysfs_remove_link(kobj, "cpufreq");
+ lock_policy_rwsem_write(cpu);
cpufreq_cpu_put(data);
}
}
if (cpufreq_driver->target)
__cpufreq_governor(data, CPUFREQ_GOV_STOP);
- kobject_put(&data->kobj);
+ kobj = &data->kobj;
+ cmp = &data->kobj_unregister;
+ unlock_policy_rwsem_write(cpu);
+ kobject_put(kobj);
/* we need to make sure that the underlying kobj is actually
* not referenced anymore by anybody before we proceed with
* unloading.
*/
dprintk("waiting for dropping of refcount\n");
- wait_for_completion(&data->kobj_unregister);
+ wait_for_completion(cmp);
dprintk("wait complete\n");
+ lock_policy_rwsem_write(cpu);
if (cpufreq_driver->exit)
cpufreq_driver->exit(data);
-
unlock_policy_rwsem_write(cpu);
free_cpumask_var(data->related_cpus);
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
{
unsigned int load = 0;
+ unsigned int max_load = 0;
unsigned int freq_target;
struct cpufreq_policy *policy;
continue;
load = 100 * (wall_time - idle_time) / wall_time;
+
+ if (load > max_load)
+ max_load = load;
}
/*
return;
/* Check for frequency increase */
- if (load > dbs_tuners_ins.up_threshold) {
+ if (max_load > dbs_tuners_ins.up_threshold) {
this_dbs_info->down_skip = 0;
/* if we are already at full speed then break out early */
* can support the current CPU usage without triggering the up
* policy. To be safe, we focus 10 points under the threshold.
*/
- if (load < (dbs_tuners_ins.down_threshold - 10)) {
+ if (max_load < (dbs_tuners_ins.down_threshold - 10)) {
freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
this_dbs_info->requested_freq -= freq_target;
for (try = 0; try < 5; try++) {
new = allocate ? old - bandwidth : old + bandwidth;
if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
- break;
+ return -EBUSY;
data[0] = cpu_to_be32(old);
data[1] = cpu_to_be32(new);
u32 channels_mask, u64 offset, bool allocate, __be32 data[2])
{
__be32 c, all, old;
- int i, retry = 5;
+ int i, ret = -EIO, retry = 5;
old = all = allocate ? cpu_to_be32(~0) : 0;
if (!(channels_mask & 1 << i))
continue;
+ ret = -EBUSY;
+
c = cpu_to_be32(1 << (31 - i));
if ((old & c) != (all & c))
continue;
/* 1394-1995 IRM, fall through to retry. */
default:
- if (retry--)
+ if (retry) {
+ retry--;
i--;
+ } else {
+ ret = -EIO;
+ }
}
}
- return -EIO;
+ return ret;
}
static void deallocate_channel(struct fw_card *card, int irm_id,
struct fw_packet *packet, u32 csr)
{
struct fw_packet response;
- int tcode, length, ext_tcode, sel;
+ int tcode, length, ext_tcode, sel, try;
__be32 *payload, lock_old;
u32 lock_arg, lock_data;
reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
reg_write(ohci, OHCI1394_CSRControl, sel);
- if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
- lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
- else
- fw_notify("swap not done yet\n");
+ for (try = 0; try < 20; try++)
+ if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
+ lock_old = cpu_to_be32(reg_read(ohci,
+ OHCI1394_CSRData));
+ fw_fill_response(&response, packet->header,
+ RCODE_COMPLETE,
+ &lock_old, sizeof(lock_old));
+ goto out;
+ }
+
+ fw_error("swap not done (CSR lock timeout)\n");
+ fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
- fw_fill_response(&response, packet->header,
- RCODE_COMPLETE, &lock_old, sizeof(lock_old));
out:
fw_core_handle_response(&ohci->card, &response);
}
static void handle_local_request(struct context *ctx, struct fw_packet *packet)
{
- u64 offset;
- u32 csr;
+ u64 offset, csr;
if (ctx == &ctx->ohci->at_request_ctx) {
packet->ack = ACK_PENDING;
dev_priv->cfb_size = size;
+ dev_priv->compressed_fb = compressed_fb;
+
if (IS_GM45(dev)) {
g4x_disable_fbc(dev);
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
i8xx_disable_fbc(dev);
I915_WRITE(FBC_CFB_BASE, cfb_base);
I915_WRITE(FBC_LL_BASE, ll_base);
+ dev_priv->compressed_llb = compressed_llb;
}
DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
ll_base, size >> 20);
}
+static void i915_cleanup_compression(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ drm_mm_put_block(dev_priv->compressed_fb);
+ if (!IS_GM45(dev))
+ drm_mm_put_block(dev_priv->compressed_llb);
+}
+
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
+ if (I915_HAS_FBC(dev) && i915_powersave)
+ i915_cleanup_compression(dev);
drm_mm_takedown(&dev_priv->vram);
i915_gem_lastclose(dev);
};
const static struct intel_device_info intel_i85x_info = {
- .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+ .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+ .cursor_needs_physical = 1,
};
const static struct intel_device_info intel_i865g_info = {
INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
- INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
+ INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
struct intel_device_info {
u8 is_mobile : 1;
u8 is_i8xx : 1;
+ u8 is_i85x : 1;
u8 is_i915g : 1;
u8 is_i9xx : 1;
u8 is_i945gm : 1;
drm_dma_handle_t *status_page_dmah;
void *hw_status_page;
+ void *seqno_page;
dma_addr_t dma_status_page;
uint32_t counter;
unsigned int status_gfx_addr;
+ unsigned int seqno_gfx_addr;
drm_local_map_t hws_map;
struct drm_gem_object *hws_obj;
+ struct drm_gem_object *seqno_obj;
struct drm_gem_object *pwrctx;
struct resource mch_res;
u8 max_delay;
enum no_fbc_reason no_fbc_reason;
+
+ struct drm_mm_node *compressed_fb;
+ struct drm_mm_node *compressed_llb;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
+#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
IS_GEN6(dev))
+#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
}
}
+#define PIPE_CONTROL_FLUSH(addr) \
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
+ PIPE_CONTROL_DEPTH_STALL); \
+ OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
+ OUT_RING(0); \
+ OUT_RING(0); \
+
/**
* Creates a new sequence number, emitting a write of it to the status page
* plus an interrupt, which will trigger i915_user_interrupt_handler.
if (dev_priv->mm.next_gem_seqno == 0)
dev_priv->mm.next_gem_seqno++;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(seqno);
+ if (HAS_PIPE_CONTROL(dev)) {
+ u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
+ /*
+ * Workaround qword write incoherence by flushing the
+ * PIPE_NOTIFY buffers out to memory before requesting
+ * an interrupt.
+ */
+ BEGIN_LP_RING(32);
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128; /* write to separate cachelines */
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+ PIPE_CONTROL_NOTIFY);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(seqno);
+
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
DRM_DEBUG_DRIVER("%d\n", seqno);
{
drm_i915_private_t *dev_priv = dev->dev_private;
- return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+ if (HAS_PIPE_CONTROL(dev))
+ return ((volatile u32 *)(dev_priv->seqno_page))[0];
+ else
+ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
}
/**
pitch_val = obj_priv->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
+ if (obj_priv->tiling_mode == I915_TILING_Y &&
+ HAS_128_BYTE_Y_TILING(dev))
+ WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
+ else
+ WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
+
val = obj_priv->gtt_offset;
if (obj_priv->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
return 0;
}
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+static int
+i915_gem_init_pipe_control(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ obj = drm_gem_object_alloc(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate seqno page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ obj_priv = to_intel_bo(obj);
+ obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+
+ ret = i915_gem_object_pin(obj, 4096);
+ if (ret)
+ goto err_unref;
+
+ dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
+ dev_priv->seqno_page = kmap(obj_priv->pages[0]);
+ if (dev_priv->seqno_page == NULL)
+ goto err_unpin;
+
+ dev_priv->seqno_obj = obj;
+ memset(dev_priv->seqno_page, 0, PAGE_SIZE);
+
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(obj);
+err:
+ return ret;
+}
+
static int
i915_gem_init_hws(struct drm_device *dev)
{
obj = drm_gem_object_alloc(dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate status page\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err;
}
obj_priv = to_intel_bo(obj);
obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
ret = i915_gem_object_pin(obj, 4096);
if (ret != 0) {
drm_gem_object_unreference(obj);
- return ret;
+ goto err_unref;
}
dev_priv->status_gfx_addr = obj_priv->gtt_offset;
if (dev_priv->hw_status_page == NULL) {
DRM_ERROR("Failed to map status page.\n");
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_unpin;
}
+
+ if (HAS_PIPE_CONTROL(dev)) {
+ ret = i915_gem_init_pipe_control(dev);
+ if (ret)
+ goto err_unpin;
+ }
+
dev_priv->hws_obj = obj;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
if (IS_GEN6(dev)) {
DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(obj);
+err:
+ return 0;
+}
+
+static void
+i915_gem_cleanup_pipe_control(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj = dev_priv->seqno_obj;
+ obj_priv = to_intel_bo(obj);
+ kunmap(obj_priv->pages[0]);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ dev_priv->seqno_obj = NULL;
+
+ dev_priv->seqno_page = NULL;
}
static void
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
dev_priv->hw_status_page = NULL;
+ if (HAS_PIPE_CONTROL(dev))
+ i915_gem_cleanup_pipe_control(dev);
+
/* Write high address into HWS_PGA when disabling. */
I915_WRITE(HWS_PGA, 0x1ffff000);
}
* reg, so dont bother to check the size */
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
- } else if (IS_I9XX(dev)) {
- uint32_t pitch_val = ffs(stride / tile_width) - 1;
-
- /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
- * instead of 4 (2KB) on 945s.
- */
- if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
- size > (I830_FENCE_MAX_SIZE_VAL << 20))
+ } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
+ if (stride > 8192)
return false;
- } else {
- uint32_t pitch_val = ffs(stride / tile_width) - 1;
- if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
- size > (I830_FENCE_MAX_SIZE_VAL << 19))
- return false;
+ if (IS_GEN3(dev)) {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 20)
+ return false;
+ } else {
+ if (size > I830_FENCE_MAX_SIZE_VAL << 19)
+ return false;
+ }
}
/* 965+ just needs multiples of tile width */
READ_BREADCRUMB(dev_priv);
}
- if (gt_iir & GT_USER_INTERRUPT) {
+ if (gt_iir & GT_PIPE_NOTIFY) {
u32 seqno = i915_get_gem_seqno(dev);
dev_priv->mm.irq_gem_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
if (HAS_PCH_SPLIT(dev))
- ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+ ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
}
BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
if (HAS_PCH_SPLIT(dev))
- ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+ ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
else
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
}
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
- u32 render_mask = GT_USER_INTERRUPT;
+ u32 render_mask = GT_PIPE_NOTIFY;
u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
struct drm_connector *connector;
+ acpi_handle handle;
+ struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
+ unsigned long long device_id;
+ acpi_status status;
int i = 0;
+ handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
+ return;
+
+ if (acpi_is_video_device(acpi_dev))
+ acpi_video_bus = acpi_dev;
+ else {
+ list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
+ if (acpi_is_video_device(acpi_cdev)) {
+ acpi_video_bus = acpi_cdev;
+ break;
+ }
+ }
+ }
+
+ if (!acpi_video_bus) {
+ printk(KERN_WARNING "No ACPI video bus found\n");
+ return;
+ }
+
+ list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
+ if (i >= 8) {
+ dev_printk (KERN_ERR, &dev->pdev->dev,
+ "More than 8 outputs detected\n");
+ return;
+ }
+ status =
+ acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
+ NULL, &device_id);
+ if (ACPI_SUCCESS(status)) {
+ if (!device_id)
+ goto blind_set;
+ opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
+ i++;
+ }
+ }
+
+end:
+ /* If fewer than 8 outputs, the list must be null terminated */
+ if (i < 8)
+ opregion->acpi->didl[i] = 0;
+ return;
+
+blind_set:
+ i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
int output_type = ACPI_OTHER_OUTPUT;
if (i >= 8) {
opregion->acpi->didl[i] |= (1<<31) | output_type | i;
i++;
}
-
- /* If fewer than 8 outputs, the list must be null terminated */
- if (i < 8)
- opregion->acpi->didl[i] = 0;
+ goto end;
}
int intel_opregion_init(struct drm_device *dev, int resume)
#define ASYNC_FLIP (1<<22)
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
+#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
+#define PIPE_CONTROL_QW_WRITE (1<<14)
+#define PIPE_CONTROL_DEPTH_STALL (1<<13)
+#define PIPE_CONTROL_WC_FLUSH (1<<12)
+#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */
+#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
+#define PIPE_CONTROL_ISP_DIS (1<<9)
+#define PIPE_CONTROL_NOTIFY (1<<8)
+#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
/*
* Fence registers
#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
#define I830_FENCE_PITCH_SHIFT 4
#define I830_FENCE_REG_VALID (1<<0)
-#define I915_FENCE_MAX_PITCH_VAL 0x10
+#define I915_FENCE_MAX_PITCH_VAL 4
#define I830_FENCE_MAX_PITCH_VAL 6
#define I830_FENCE_MAX_SIZE_VAL (1<<8)
#define DEIER 0x4400c
/* GT interrupt */
+#define GT_PIPE_NOTIFY (1 << 4)
#define GT_SYNC_STATUS (1 << 2)
#define GT_USER_INTERRUPT (1 << 0)
dev_priv->display.update_wm = g4x_update_wm;
else if (IS_I965G(dev))
dev_priv->display.update_wm = i965_update_wm;
- else if (IS_I9XX(dev) || IS_MOBILE(dev)) {
+ else if (IS_I9XX(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ } else if (IS_I85X(dev)) {
+ dev_priv->display.update_wm = i9xx_update_wm;
+ dev_priv->display.get_fifo_size = i85x_get_fifo_size;
} else {
- if (IS_I85X(dev))
- dev_priv->display.get_fifo_size = i85x_get_fifo_size;
- else if (IS_845G(dev))
+ dev_priv->display.update_wm = i830_update_wm;
+ if (IS_845G(dev))
dev_priv->display.get_fifo_size = i845_get_fifo_size;
else
dev_priv->display.get_fifo_size = i830_get_fifo_size;
- dev_priv->display.update_wm = i830_update_wm;
}
}
int err;
list_for_each_entry(s, &data->sensor_list, list) {
+ sysfs_attr_init(&s->input_attr.attr);
err = device_create_file(data->hwmon_dev, &s->input_attr);
if (err)
return err;
+ sysfs_attr_init(&s->label_attr.attr);
err = device_create_file(data->hwmon_dev, &s->label_attr);
if (err)
return err;
+ sysfs_attr_init(&s->limit1_attr.attr);
err = device_create_file(data->hwmon_dev, &s->limit1_attr);
if (err)
return err;
+ sysfs_attr_init(&s->limit2_attr.attr);
err = device_create_file(data->hwmon_dev, &s->limit2_attr);
if (err)
return err;
AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted),
AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
+ AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
+ AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
+ AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
+ AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
{ NULL, }
/* Laptop models without axis info (yet):
* "NC6910" "HP Compaq 6910"
"<%s> I2C Interrupted\n", __func__);
return -EINTR;
}
- if (time_after(jiffies, orig_jiffies + HZ / 1000)) {
+ if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
dev_dbg(&i2c_imx->adapter.dev,
"<%s> I2C bus is busy\n", __func__);
- return -EIO;
+ return -ETIMEDOUT;
}
schedule();
}
result = i2c_imx_read(i2c_imx, &msgs[i]);
else
result = i2c_imx_write(i2c_imx, &msgs[i]);
+ if (result)
+ goto fail0;
}
fail0:
platform_set_drvdata(pdev, dev);
+ if (cpu_is_omap7xx())
+ dev->reg_shift = 1;
+ else
+ dev->reg_shift = 2;
+
if ((r = omap_i2c_get_clocks(dev)) != 0)
goto err_iounmap;
dev->b_hw = 1; /* Enable hardware fixes */
}
- if (cpu_is_omap7xx())
- dev->reg_shift = 1;
- else
- dev->reg_shift = 2;
-
/* reset ASAP, clearing any IRQs */
omap_i2c_init(dev);
/* We still have something to talk about... */
val = *alg_data->mif.buf++;
+ if (alg_data->mif.len == 1)
+ val |= stop_bit;
+
alg_data->mif.len--;
iowrite32(val, I2C_REG_TX(alg_data));
__func__);
if (alg_data->mif.len == 1) {
+ /* Last byte, do not acknowledge next rcv. */
+ val |= stop_bit;
+
/*
* Enable interrupt RFDAIE (data in Rx fifo),
* and disable DRMIE (need data for Tx)
*/
tmp = ((freq / 1000) / I2C_PNX_SPEED_KHZ) / 2 - 2;
+ if (tmp > 0x3FF)
+ tmp = 0x3FF;
iowrite32(tmp, I2C_REG_CKH(alg_data));
iowrite32(tmp, I2C_REG_CKL(alg_data));
int i = 0;
/* Locate the apropriate clock setting */
- while (i < ARRAY_SIZE(stu300_clktable) &&
+ while (i < ARRAY_SIZE(stu300_clktable) - 1 &&
stu300_clktable[i].rate < clkrate)
i++;
PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
+ PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17),
+ PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10),
PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
+ PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d),
+ PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47),
PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
int previous, int *dd_idx,
struct stripe_head *sh)
{
- long stripe;
- unsigned long chunk_number;
+ sector_t stripe, stripe2;
+ sector_t chunk_number;
unsigned int chunk_offset;
int pd_idx, qd_idx;
int ddf_layout = 0;
*/
chunk_offset = sector_div(r_sector, sectors_per_chunk);
chunk_number = r_sector;
- BUG_ON(r_sector != chunk_number);
/*
* Compute the stripe number
*/
- stripe = chunk_number / data_disks;
-
- /*
- * Compute the data disk and parity disk indexes inside the stripe
- */
- *dd_idx = chunk_number % data_disks;
-
+ stripe = chunk_number;
+ *dd_idx = sector_div(stripe, data_disks);
+ stripe2 = stripe;
/*
* Select the parity disk based on the user selected algorithm.
*/
case 5:
switch (algorithm) {
case ALGORITHM_LEFT_ASYMMETRIC:
- pd_idx = data_disks - stripe % raid_disks;
+ pd_idx = data_disks - sector_div(stripe2, raid_disks);
if (*dd_idx >= pd_idx)
(*dd_idx)++;
break;
case ALGORITHM_RIGHT_ASYMMETRIC:
- pd_idx = stripe % raid_disks;
+ pd_idx = sector_div(stripe2, raid_disks);
if (*dd_idx >= pd_idx)
(*dd_idx)++;
break;
case ALGORITHM_LEFT_SYMMETRIC:
- pd_idx = data_disks - stripe % raid_disks;
+ pd_idx = data_disks - sector_div(stripe2, raid_disks);
*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
break;
case ALGORITHM_RIGHT_SYMMETRIC:
- pd_idx = stripe % raid_disks;
+ pd_idx = sector_div(stripe2, raid_disks);
*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
break;
case ALGORITHM_PARITY_0:
switch (algorithm) {
case ALGORITHM_LEFT_ASYMMETRIC:
- pd_idx = raid_disks - 1 - (stripe % raid_disks);
+ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
qd_idx = pd_idx + 1;
if (pd_idx == raid_disks-1) {
(*dd_idx)++; /* Q D D D P */
(*dd_idx) += 2; /* D D P Q D */
break;
case ALGORITHM_RIGHT_ASYMMETRIC:
- pd_idx = stripe % raid_disks;
+ pd_idx = sector_div(stripe2, raid_disks);
qd_idx = pd_idx + 1;
if (pd_idx == raid_disks-1) {
(*dd_idx)++; /* Q D D D P */
(*dd_idx) += 2; /* D D P Q D */
break;
case ALGORITHM_LEFT_SYMMETRIC:
- pd_idx = raid_disks - 1 - (stripe % raid_disks);
+ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
qd_idx = (pd_idx + 1) % raid_disks;
*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
break;
case ALGORITHM_RIGHT_SYMMETRIC:
- pd_idx = stripe % raid_disks;
+ pd_idx = sector_div(stripe2, raid_disks);
qd_idx = (pd_idx + 1) % raid_disks;
*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
break;
/* Exactly the same as RIGHT_ASYMMETRIC, but or
* of blocks for computing Q is different.
*/
- pd_idx = stripe % raid_disks;
+ pd_idx = sector_div(stripe2, raid_disks);
qd_idx = pd_idx + 1;
if (pd_idx == raid_disks-1) {
(*dd_idx)++; /* Q D D D P */
* D D D P Q rather than
* Q D D D P
*/
- pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks);
+ stripe2 += 1;
+ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
qd_idx = pd_idx + 1;
if (pd_idx == raid_disks-1) {
(*dd_idx)++; /* Q D D D P */
case ALGORITHM_ROTATING_N_CONTINUE:
/* Same as left_symmetric but Q is before P */
- pd_idx = raid_disks - 1 - (stripe % raid_disks);
+ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
ddf_layout = 1;
case ALGORITHM_LEFT_ASYMMETRIC_6:
/* RAID5 left_asymmetric, with Q on last device */
- pd_idx = data_disks - stripe % (raid_disks-1);
+ pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
if (*dd_idx >= pd_idx)
(*dd_idx)++;
qd_idx = raid_disks - 1;
break;
case ALGORITHM_RIGHT_ASYMMETRIC_6:
- pd_idx = stripe % (raid_disks-1);
+ pd_idx = sector_div(stripe2, raid_disks-1);
if (*dd_idx >= pd_idx)
(*dd_idx)++;
qd_idx = raid_disks - 1;
break;
case ALGORITHM_LEFT_SYMMETRIC_6:
- pd_idx = data_disks - stripe % (raid_disks-1);
+ pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
qd_idx = raid_disks - 1;
break;
case ALGORITHM_RIGHT_SYMMETRIC_6:
- pd_idx = stripe % (raid_disks-1);
+ pd_idx = sector_div(stripe2, raid_disks-1);
*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
qd_idx = raid_disks - 1;
break;
: conf->algorithm;
sector_t stripe;
int chunk_offset;
- int chunk_number, dummy1, dd_idx = i;
+ sector_t chunk_number;
+ int dummy1, dd_idx = i;
sector_t r_sector;
struct stripe_head sh2;
chunk_offset = sector_div(new_sector, sectors_per_chunk);
stripe = new_sector;
- BUG_ON(new_sector != stripe);
if (i == sh->pd_idx)
return 0;
}
chunk_number = stripe * data_disks + i;
- r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
+ r_sector = chunk_number * sectors_per_chunk + chunk_offset;
check = raid5_compute_sector(conf, r_sector,
previous, &dummy1, &sh2);
This driver can also be built as a module. If so, the module
will be calles ti_dac7512.
+config VMWARE_BALLOON
+ tristate "VMware Balloon Driver"
+ depends on X86
+ help
+ This is VMware physical memory management driver which acts
+ like a "balloon" that can be inflated to reclaim physical pages
+ by reserving them in the guest and invalidating them in the
+ monitor, freeing up the underlying machine pages so they can
+ be allocated to other guests. The balloon can also be deflated
+ to allow the guest to use more physical memory.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vmware_balloon.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
obj-y += eeprom/
obj-y += cb710/
+obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o
--- /dev/null
+/*
+ * VMware Balloon driver.
+ *
+ * Copyright (C) 2000-2010, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained by: Dmitry Torokhov <dtor@vmware.com>
+ */
+
+/*
+ * This is VMware physical memory management driver for Linux. The driver
+ * acts like a "balloon" that can be inflated to reclaim physical pages by
+ * reserving them in the guest and invalidating them in the monitor,
+ * freeing up the underlying machine pages so they can be allocated to
+ * other guests. The balloon can also be deflated to allow the guest to
+ * use more physical memory. Higher level policies can control the sizes
+ * of balloons in VMs in order to manage physical memory resources.
+ */
+
+//#define DEBUG
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/vmware.h>
+
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
+MODULE_VERSION("1.2.1.0-K");
+MODULE_ALIAS("dmi:*:svnVMware*:*");
+MODULE_ALIAS("vmware_vmmemctl");
+MODULE_LICENSE("GPL");
+
+/*
+ * Various constants controlling rate of inflaint/deflating balloon,
+ * measured in pages.
+ */
+
+/*
+ * Rate of allocating memory when there is no memory pressure
+ * (driver performs non-sleeping allocations).
+ */
+#define VMW_BALLOON_NOSLEEP_ALLOC_MAX 16384U
+
+/*
+ * Rates of memory allocaton when guest experiences memory pressure
+ * (driver performs sleeping allocations).
+ */
+#define VMW_BALLOON_RATE_ALLOC_MIN 512U
+#define VMW_BALLOON_RATE_ALLOC_MAX 2048U
+#define VMW_BALLOON_RATE_ALLOC_INC 16U
+
+/*
+ * Rates for releasing pages while deflating balloon.
+ */
+#define VMW_BALLOON_RATE_FREE_MIN 512U
+#define VMW_BALLOON_RATE_FREE_MAX 16384U
+#define VMW_BALLOON_RATE_FREE_INC 16U
+
+/*
+ * When guest is under memory pressure, use a reduced page allocation
+ * rate for next several cycles.
+ */
+#define VMW_BALLOON_SLOW_CYCLES 4
+
+/*
+ * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
+ * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
+ * __GFP_NOWARN, to suppress page allocation failure warnings.
+ */
+#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
+
+/*
+ * Use GFP_HIGHUSER when executing in a separate kernel thread
+ * context and allocation can sleep. This is less stressful to
+ * the guest memory system, since it allows the thread to block
+ * while memory is reclaimed, and won't take pages from emergency
+ * low-memory pools.
+ */
+#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
+
+/* Maximum number of page allocations without yielding processor */
+#define VMW_BALLOON_YIELD_THRESHOLD 1024
+
+
+/*
+ * Hypervisor communication port definitions.
+ */
+#define VMW_BALLOON_HV_PORT 0x5670
+#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
+#define VMW_BALLOON_PROTOCOL_VERSION 2
+#define VMW_BALLOON_GUEST_ID 1 /* Linux */
+
+#define VMW_BALLOON_CMD_START 0
+#define VMW_BALLOON_CMD_GET_TARGET 1
+#define VMW_BALLOON_CMD_LOCK 2
+#define VMW_BALLOON_CMD_UNLOCK 3
+#define VMW_BALLOON_CMD_GUEST_ID 4
+
+/* error codes */
+#define VMW_BALLOON_SUCCESS 0
+#define VMW_BALLOON_FAILURE -1
+#define VMW_BALLOON_ERROR_CMD_INVALID 1
+#define VMW_BALLOON_ERROR_PPN_INVALID 2
+#define VMW_BALLOON_ERROR_PPN_LOCKED 3
+#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
+#define VMW_BALLOON_ERROR_PPN_PINNED 5
+#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
+#define VMW_BALLOON_ERROR_RESET 7
+#define VMW_BALLOON_ERROR_BUSY 8
+
+#define VMWARE_BALLOON_CMD(cmd, data, result) \
+({ \
+ unsigned long __stat, __dummy1, __dummy2; \
+ __asm__ __volatile__ ("inl (%%dx)" : \
+ "=a"(__stat), \
+ "=c"(__dummy1), \
+ "=d"(__dummy2), \
+ "=b"(result) : \
+ "0"(VMW_BALLOON_HV_MAGIC), \
+ "1"(VMW_BALLOON_CMD_##cmd), \
+ "2"(VMW_BALLOON_HV_PORT), \
+ "3"(data) : \
+ "memory"); \
+ result &= -1UL; \
+ __stat & -1UL; \
+})
+
+#ifdef CONFIG_DEBUG_FS
+struct vmballoon_stats {
+ unsigned int timer;
+
+ /* allocation statustics */
+ unsigned int alloc;
+ unsigned int alloc_fail;
+ unsigned int sleep_alloc;
+ unsigned int sleep_alloc_fail;
+ unsigned int refused_alloc;
+ unsigned int refused_free;
+ unsigned int free;
+
+ /* monitor operations */
+ unsigned int lock;
+ unsigned int lock_fail;
+ unsigned int unlock;
+ unsigned int unlock_fail;
+ unsigned int target;
+ unsigned int target_fail;
+ unsigned int start;
+ unsigned int start_fail;
+ unsigned int guest_type;
+ unsigned int guest_type_fail;
+};
+
+#define STATS_INC(stat) (stat)++
+#else
+#define STATS_INC(stat)
+#endif
+
+struct vmballoon {
+
+ /* list of reserved physical pages */
+ struct list_head pages;
+
+ /* transient list of non-balloonable pages */
+ struct list_head refused_pages;
+
+ /* balloon size in pages */
+ unsigned int size;
+ unsigned int target;
+
+ /* reset flag */
+ bool reset_required;
+
+ /* adjustment rates (pages per second) */
+ unsigned int rate_alloc;
+ unsigned int rate_free;
+
+ /* slowdown page allocations for next few cycles */
+ unsigned int slow_allocation_cycles;
+
+#ifdef CONFIG_DEBUG_FS
+ /* statistics */
+ struct vmballoon_stats stats;
+
+ /* debugfs file exporting statistics */
+ struct dentry *dbg_entry;
+#endif
+
+ struct sysinfo sysinfo;
+
+ struct delayed_work dwork;
+};
+
+static struct vmballoon balloon;
+static struct workqueue_struct *vmballoon_wq;
+
+/*
+ * Send "start" command to the host, communicating supported version
+ * of the protocol.
+ */
+static bool vmballoon_send_start(struct vmballoon *b)
+{
+ unsigned long status, dummy;
+
+ STATS_INC(b->stats.start);
+
+ status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy);
+ if (status == VMW_BALLOON_SUCCESS)
+ return true;
+
+ pr_debug("%s - failed, hv returns %ld\n", __func__, status);
+ STATS_INC(b->stats.start_fail);
+ return false;
+}
+
+static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
+{
+ switch (status) {
+ case VMW_BALLOON_SUCCESS:
+ return true;
+
+ case VMW_BALLOON_ERROR_RESET:
+ b->reset_required = true;
+ /* fall through */
+
+ default:
+ return false;
+ }
+}
+
+/*
+ * Communicate guest type to the host so that it can adjust ballooning
+ * algorithm to the one most appropriate for the guest. This command
+ * is normally issued after sending "start" command and is part of
+ * standard reset sequence.
+ */
+static bool vmballoon_send_guest_id(struct vmballoon *b)
+{
+ unsigned long status, dummy;
+
+ status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy);
+
+ STATS_INC(b->stats.guest_type);
+
+ if (vmballoon_check_status(b, status))
+ return true;
+
+ pr_debug("%s - failed, hv returns %ld\n", __func__, status);
+ STATS_INC(b->stats.guest_type_fail);
+ return false;
+}
+
+/*
+ * Retrieve desired balloon size from the host.
+ */
+static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
+{
+ unsigned long status;
+ unsigned long target;
+ unsigned long limit;
+ u32 limit32;
+
+ /*
+ * si_meminfo() is cheap. Moreover, we want to provide dynamic
+ * max balloon size later. So let us call si_meminfo() every
+ * iteration.
+ */
+ si_meminfo(&b->sysinfo);
+ limit = b->sysinfo.totalram;
+
+ /* Ensure limit fits in 32-bits */
+ limit32 = (u32)limit;
+ if (limit != limit32)
+ return false;
+
+ /* update stats */
+ STATS_INC(b->stats.target);
+
+ status = VMWARE_BALLOON_CMD(GET_TARGET, limit, target);
+ if (vmballoon_check_status(b, status)) {
+ *new_target = target;
+ return true;
+ }
+
+ pr_debug("%s - failed, hv returns %ld\n", __func__, status);
+ STATS_INC(b->stats.target_fail);
+ return false;
+}
+
+/*
+ * Notify the host about allocated page so that host can use it without
+ * fear that guest will need it. Host may reject some pages, we need to
+ * check the return value and maybe submit a different page.
+ */
+static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn)
+{
+ unsigned long status, dummy;
+ u32 pfn32;
+
+ pfn32 = (u32)pfn;
+ if (pfn32 != pfn)
+ return false;
+
+ STATS_INC(b->stats.lock);
+
+ status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
+ if (vmballoon_check_status(b, status))
+ return true;
+
+ pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
+ STATS_INC(b->stats.lock_fail);
+ return false;
+}
+
+/*
+ * Notify the host that guest intends to release given page back into
+ * the pool of available (to the guest) pages.
+ */
+static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn)
+{
+ unsigned long status, dummy;
+ u32 pfn32;
+
+ pfn32 = (u32)pfn;
+ if (pfn32 != pfn)
+ return false;
+
+ STATS_INC(b->stats.unlock);
+
+ status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy);
+ if (vmballoon_check_status(b, status))
+ return true;
+
+ pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
+ STATS_INC(b->stats.unlock_fail);
+ return false;
+}
+
+/*
+ * Quickly release all pages allocated for the balloon. This function is
+ * called when host decides to "reset" balloon for one reason or another.
+ * Unlike normal "deflate" we do not (shall not) notify host of the pages
+ * being released.
+ */
+static void vmballoon_pop(struct vmballoon *b)
+{
+ struct page *page, *next;
+ unsigned int count = 0;
+
+ list_for_each_entry_safe(page, next, &b->pages, lru) {
+ list_del(&page->lru);
+ __free_page(page);
+ STATS_INC(b->stats.free);
+ b->size--;
+
+ if (++count >= b->rate_free) {
+ count = 0;
+ cond_resched();
+ }
+ }
+}
+
+/*
+ * Perform standard reset sequence by popping the balloon (in case it
+ * is not empty) and then restarting protocol. This operation normally
+ * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
+ */
+static void vmballoon_reset(struct vmballoon *b)
+{
+ /* free all pages, skipping monitor unlock */
+ vmballoon_pop(b);
+
+ if (vmballoon_send_start(b)) {
+ b->reset_required = false;
+ if (!vmballoon_send_guest_id(b))
+ pr_err("failed to send guest ID to the host\n");
+ }
+}
+
+/*
+ * Allocate (or reserve) a page for the balloon and notify the host. If host
+ * refuses the page put it on "refuse" list and allocate another one until host
+ * is satisfied. "Refused" pages are released at the end of inflation cycle
+ * (when we allocate b->rate_alloc pages).
+ */
+static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
+{
+ struct page *page;
+ gfp_t flags;
+ bool locked = false;
+
+ do {
+ if (!can_sleep)
+ STATS_INC(b->stats.alloc);
+ else
+ STATS_INC(b->stats.sleep_alloc);
+
+ flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
+ page = alloc_page(flags);
+ if (!page) {
+ if (!can_sleep)
+ STATS_INC(b->stats.alloc_fail);
+ else
+ STATS_INC(b->stats.sleep_alloc_fail);
+ return -ENOMEM;
+ }
+
+ /* inform monitor */
+ locked = vmballoon_send_lock_page(b, page_to_pfn(page));
+ if (!locked) {
+ if (b->reset_required) {
+ __free_page(page);
+ return -EIO;
+ }
+
+ /* place on list of non-balloonable pages, retry allocation */
+ list_add(&page->lru, &b->refused_pages);
+ STATS_INC(b->stats.refused_alloc);
+ }
+ } while (!locked);
+
+ /* track allocated page */
+ list_add(&page->lru, &b->pages);
+
+ /* update balloon size */
+ b->size++;
+
+ return 0;
+}
+
+/*
+ * Release the page allocated for the balloon. Note that we first notify
+ * the host so it can make sure the page will be available for the guest
+ * to use, if needed.
+ */
+static int vmballoon_release_page(struct vmballoon *b, struct page *page)
+{
+ if (!vmballoon_send_unlock_page(b, page_to_pfn(page)))
+ return -EIO;
+
+ list_del(&page->lru);
+
+ /* deallocate page */
+ __free_page(page);
+ STATS_INC(b->stats.free);
+
+ /* update balloon size */
+ b->size--;
+
+ return 0;
+}
+
+/*
+ * Release pages that were allocated while attempting to inflate the
+ * balloon but were refused by the host for one reason or another.
+ */
+static void vmballoon_release_refused_pages(struct vmballoon *b)
+{
+ struct page *page, *next;
+
+ list_for_each_entry_safe(page, next, &b->refused_pages, lru) {
+ list_del(&page->lru);
+ __free_page(page);
+ STATS_INC(b->stats.refused_free);
+ }
+}
+
+/*
+ * Inflate the balloon towards its target size. Note that we try to limit
+ * the rate of allocation to make sure we are not choking the rest of the
+ * system.
+ */
+static void vmballoon_inflate(struct vmballoon *b)
+{
+ unsigned int goal;
+ unsigned int rate;
+ unsigned int i;
+ unsigned int allocations = 0;
+ int error = 0;
+ bool alloc_can_sleep = false;
+
+ pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
+
+ /*
+ * First try NOSLEEP page allocations to inflate balloon.
+ *
+ * If we do not throttle nosleep allocations, we can drain all
+ * free pages in the guest quickly (if the balloon target is high).
+ * As a side-effect, draining free pages helps to inform (force)
+ * the guest to start swapping if balloon target is not met yet,
+ * which is a desired behavior. However, balloon driver can consume
+ * all available CPU cycles if too many pages are allocated in a
+ * second. Therefore, we throttle nosleep allocations even when
+ * the guest is not under memory pressure. OTOH, if we have already
+ * predicted that the guest is under memory pressure, then we
+ * slowdown page allocations considerably.
+ */
+
+ goal = b->target - b->size;
+ /*
+ * Start with no sleep allocation rate which may be higher
+ * than sleeping allocation rate.
+ */
+ rate = b->slow_allocation_cycles ?
+ b->rate_alloc : VMW_BALLOON_NOSLEEP_ALLOC_MAX;
+
+ pr_debug("%s - goal: %d, no-sleep rate: %d, sleep rate: %d\n",
+ __func__, goal, rate, b->rate_alloc);
+
+ for (i = 0; i < goal; i++) {
+
+ error = vmballoon_reserve_page(b, alloc_can_sleep);
+ if (error) {
+ if (error != -ENOMEM) {
+ /*
+ * Not a page allocation failure, stop this
+ * cycle. Maybe we'll get new target from
+ * the host soon.
+ */
+ break;
+ }
+
+ if (alloc_can_sleep) {
+ /*
+ * CANSLEEP page allocation failed, so guest
+ * is under severe memory pressure. Quickly
+ * decrease allocation rate.
+ */
+ b->rate_alloc = max(b->rate_alloc / 2,
+ VMW_BALLOON_RATE_ALLOC_MIN);
+ break;
+ }
+
+ /*
+ * NOSLEEP page allocation failed, so the guest is
+ * under memory pressure. Let us slow down page
+ * allocations for next few cycles so that the guest
+ * gets out of memory pressure. Also, if we already
+ * allocated b->rate_alloc pages, let's pause,
+ * otherwise switch to sleeping allocations.
+ */
+ b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;
+
+ if (i >= b->rate_alloc)
+ break;
+
+ alloc_can_sleep = true;
+ /* Lower rate for sleeping allocations. */
+ rate = b->rate_alloc;
+ }
+
+ if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) {
+ cond_resched();
+ allocations = 0;
+ }
+
+ if (i >= rate) {
+ /* We allocated enough pages, let's take a break. */
+ break;
+ }
+ }
+
+ /*
+ * We reached our goal without failures so try increasing
+ * allocation rate.
+ */
+ if (error == 0 && i >= b->rate_alloc) {
+ unsigned int mult = i / b->rate_alloc;
+
+ b->rate_alloc =
+ min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
+ VMW_BALLOON_RATE_ALLOC_MAX);
+ }
+
+ vmballoon_release_refused_pages(b);
+}
+
+/*
+ * Decrease the size of the balloon allowing guest to use more memory.
+ */
+static void vmballoon_deflate(struct vmballoon *b)
+{
+ struct page *page, *next;
+ unsigned int i = 0;
+ unsigned int goal;
+ int error;
+
+ pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
+
+ /* limit deallocation rate */
+ goal = min(b->size - b->target, b->rate_free);
+
+ pr_debug("%s - goal: %d, rate: %d\n", __func__, goal, b->rate_free);
+
+ /* free pages to reach target */
+ list_for_each_entry_safe(page, next, &b->pages, lru) {
+ error = vmballoon_release_page(b, page);
+ if (error) {
+ /* quickly decrease rate in case of error */
+ b->rate_free = max(b->rate_free / 2,
+ VMW_BALLOON_RATE_FREE_MIN);
+ return;
+ }
+
+ if (++i >= goal)
+ break;
+ }
+
+ /* slowly increase rate if there were no errors */
+ b->rate_free = min(b->rate_free + VMW_BALLOON_RATE_FREE_INC,
+ VMW_BALLOON_RATE_FREE_MAX);
+}
+
+/*
+ * Balloon work function: reset protocol, if needed, get the new size and
+ * adjust balloon as needed. Repeat in 1 sec.
+ */
+static void vmballoon_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
+ unsigned int target;
+
+ STATS_INC(b->stats.timer);
+
+ if (b->reset_required)
+ vmballoon_reset(b);
+
+ if (b->slow_allocation_cycles > 0)
+ b->slow_allocation_cycles--;
+
+ if (vmballoon_send_get_target(b, &target)) {
+ /* update target, adjust size */
+ b->target = target;
+
+ if (b->size < target)
+ vmballoon_inflate(b);
+ else if (b->size > target)
+ vmballoon_deflate(b);
+ }
+
+ queue_delayed_work(vmballoon_wq, dwork, round_jiffies_relative(HZ));
+}
+
+/*
+ * DEBUGFS Interface
+ */
+#ifdef CONFIG_DEBUG_FS
+
+static int vmballoon_debug_show(struct seq_file *f, void *offset)
+{
+ struct vmballoon *b = f->private;
+ struct vmballoon_stats *stats = &b->stats;
+
+ /* format size info */
+ seq_printf(f,
+ "target: %8d pages\n"
+ "current: %8d pages\n",
+ b->target, b->size);
+
+ /* format rate info */
+ seq_printf(f,
+ "rateNoSleepAlloc: %8d pages/sec\n"
+ "rateSleepAlloc: %8d pages/sec\n"
+ "rateFree: %8d pages/sec\n",
+ VMW_BALLOON_NOSLEEP_ALLOC_MAX,
+ b->rate_alloc, b->rate_free);
+
+ seq_printf(f,
+ "\n"
+ "timer: %8u\n"
+ "start: %8u (%4u failed)\n"
+ "guestType: %8u (%4u failed)\n"
+ "lock: %8u (%4u failed)\n"
+ "unlock: %8u (%4u failed)\n"
+ "target: %8u (%4u failed)\n"
+ "primNoSleepAlloc: %8u (%4u failed)\n"
+ "primCanSleepAlloc: %8u (%4u failed)\n"
+ "primFree: %8u\n"
+ "errAlloc: %8u\n"
+ "errFree: %8u\n",
+ stats->timer,
+ stats->start, stats->start_fail,
+ stats->guest_type, stats->guest_type_fail,
+ stats->lock, stats->lock_fail,
+ stats->unlock, stats->unlock_fail,
+ stats->target, stats->target_fail,
+ stats->alloc, stats->alloc_fail,
+ stats->sleep_alloc, stats->sleep_alloc_fail,
+ stats->free,
+ stats->refused_alloc, stats->refused_free);
+
+ return 0;
+}
+
+static int vmballoon_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vmballoon_debug_show, inode->i_private);
+}
+
+static const struct file_operations vmballoon_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = vmballoon_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init vmballoon_debugfs_init(struct vmballoon *b)
+{
+ int error;
+
+ b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
+ &vmballoon_debug_fops);
+ if (IS_ERR(b->dbg_entry)) {
+ error = PTR_ERR(b->dbg_entry);
+ pr_err("failed to create debugfs entry, error: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
+{
+ debugfs_remove(b->dbg_entry);
+}
+
+#else
+
+static inline int vmballoon_debugfs_init(struct vmballoon *b)
+{
+ return 0;
+}
+
+static inline void vmballoon_debugfs_exit(struct vmballoon *b)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+static int __init vmballoon_init(void)
+{
+ int error;
+
+ /*
+ * Check if we are running on VMware's hypervisor and bail out
+ * if we are not.
+ */
+ if (!vmware_platform())
+ return -ENODEV;
+
+ vmballoon_wq = create_freezeable_workqueue("vmmemctl");
+ if (!vmballoon_wq) {
+ pr_err("failed to create workqueue\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&balloon.pages);
+ INIT_LIST_HEAD(&balloon.refused_pages);
+
+ /* initialize rates */
+ balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;
+ balloon.rate_free = VMW_BALLOON_RATE_FREE_MAX;
+
+ INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
+
+ /*
+ * Start balloon.
+ */
+ if (!vmballoon_send_start(&balloon)) {
+ pr_err("failed to send start command to the host\n");
+ error = -EIO;
+ goto fail;
+ }
+
+ if (!vmballoon_send_guest_id(&balloon)) {
+ pr_err("failed to send guest ID to the host\n");
+ error = -EIO;
+ goto fail;
+ }
+
+ error = vmballoon_debugfs_init(&balloon);
+ if (error)
+ goto fail;
+
+ queue_delayed_work(vmballoon_wq, &balloon.dwork, 0);
+
+ return 0;
+
+fail:
+ destroy_workqueue(vmballoon_wq);
+ return error;
+}
+module_init(vmballoon_init);
+
+static void __exit vmballoon_exit(void)
+{
+ cancel_delayed_work_sync(&balloon.dwork);
+ destroy_workqueue(vmballoon_wq);
+
+ vmballoon_debugfs_exit(&balloon);
+
+ /*
+ * Deallocate all reserved memory, and reset connection with monitor.
+ * Reset connection before deallocating memory to avoid potential for
+ * additional spurious resets from guest touching deallocated pages.
+ */
+ vmballoon_send_start(&balloon);
+ vmballoon_pop(&balloon);
+}
+module_exit(vmballoon_exit);
}
buf64 = (uint64_t *)buf;
while (i < len/8) {
- uint64_t x;
+ /*
+ * Since GCC has no proper constraint (PR 43518)
+ * force x variable to r2/r3 registers as ldrd instruction
+ * requires first register to be even.
+ */
+ register uint64_t x asm ("r2");
+
asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base));
buf64[i++] = x;
}
*/
int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
{
- return state > PCI_D0 ?
+ return state >= PCI_D0 ?
pci_platform_power_transition(dev, state) : -EINVAL;
}
EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
*/
return 0;
- /* Check if we're already there */
- if (dev->current_state == state)
- return 0;
-
__pci_start_power_transition(dev, state);
/* This device is quirked not to be put into D3, so
/* Assert Secondary Bus Reset */
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl);
- p2p_ctrl |= PCI_CB_BRIDGE_CTL_CB_RESET;
+ p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
+ /*
+ * we should send hot reset message for 2ms to allow it time to
+ * propogate to all downstream ports
+ */
+ msleep(2);
+
/* De-assert Secondary Bus Reset */
- p2p_ctrl &= ~PCI_CB_BRIDGE_CTL_CB_RESET;
+ p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
/*
pci_read_config_dword(dev, pos, &sz);
pci_write_config_dword(dev, pos, l);
- if (!sz)
- goto fail; /* BAR not implemented */
-
/*
* All bits set in sz means the device isn't working properly.
- * If it's a memory BAR or a ROM, bit 0 must be clear; if it's
- * an io BAR, bit 1 must be clear.
+ * If the BAR isn't implemented, all bits must be 0. If it's a
+ * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
+ * 1 must be clear.
*/
- if (sz == 0xffffffff) {
- dev_err(&dev->dev, "reg %x: invalid size %#x; broken device?\n",
- pos, sz);
+ if (!sz || sz == 0xffffffff)
goto fail;
- }
/*
* I don't know how l can have all bits set. Copied from old code.
pos, res);
}
} else {
- u32 size = pci_size(l, sz, mask);
+ sz = pci_size(l, sz, mask);
- if (!size) {
- dev_err(&dev->dev, "reg %x: invalid size "
- "(l %#x sz %#x mask %#x); broken device?",
- pos, l, sz, mask);
+ if (!sz)
goto fail;
- }
res->start = l;
- res->end = l + size;
+ res->end = l + sz;
dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
}
depends on ACPI_WMI
depends on INPUT
depends on EXPERIMENTAL
+ select INPUT_SPARSEKMAP
---help---
Say Y here if you want to support WMI-based hotkeys on Eee PC laptops.
module_param(wapf, uint, 0644);
MODULE_PARM_DESC(wapf, "WAPF value");
-static uint wlan_status = 1;
-static uint bluetooth_status = 1;
+static int wlan_status = 1;
+static int bluetooth_status = 1;
-module_param(wlan_status, uint, 0644);
+module_param(wlan_status, int, 0644);
MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot "
"(0 = disabled, 1 = enabled, -1 = don't do anything). "
"default is 1");
-module_param(bluetooth_status, uint, 0644);
+module_param(bluetooth_status, int, 0644);
MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
"(0 = disabled, 1 = enabled, -1 = don't do anything). "
"default is 1");
if (dell_new_hk_type && (buffer_entry[1] != 0x10)) {
printk(KERN_INFO "dell-wmi: Received unknown WMI event"
" (0x%x)\n", buffer_entry[1]);
+ kfree(obj);
return;
}
key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) {
/* Don't report brightness notifications that will also
* come via ACPI */
- return;
+ ;
} else {
input_report_key(dell_wmi_input_dev, key->keycode, 1);
input_sync(dell_wmi_input_dev);
struct backlight_device *backlight_device;
struct input_dev *inputdev;
- struct key_entry *keymap;
struct rfkill *wlan_rfkill;
struct rfkill *bluetooth_rfkill;
static void eeepc_input_exit(struct eeepc_laptop *eeepc)
{
if (eeepc->inputdev) {
+ sparse_keymap_free(eeepc->inputdev);
input_unregister_device(eeepc->inputdev);
- kfree(eeepc->keymap);
}
}
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+#include <linux/platform_device.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#define EEEPC_WMI_FILE "eeepc-wmi"
+
MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver");
MODULE_LICENSE("GPL");
#define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000"
+#define EEEPC_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66"
MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID);
+MODULE_ALIAS("wmi:"EEEPC_WMI_MGMT_GUID);
#define NOTIFY_BRNUP_MIN 0x11
#define NOTIFY_BRNUP_MAX 0x1f
#define NOTIFY_BRNDOWN_MIN 0x20
#define NOTIFY_BRNDOWN_MAX 0x2e
+#define EEEPC_WMI_METHODID_DEVS 0x53564544
+#define EEEPC_WMI_METHODID_DSTS 0x53544344
+
+#define EEEPC_WMI_DEVID_BACKLIGHT 0x00050012
+
static const struct key_entry eeepc_wmi_keymap[] = {
/* Sleep already handled via generic ACPI code */
{ KE_KEY, 0x5d, { KEY_WLAN } },
{ KE_END, 0},
};
-static struct input_dev *eeepc_wmi_input_dev;
+struct bios_args {
+ u32 dev_id;
+ u32 ctrl_param;
+};
+
+struct eeepc_wmi {
+ struct input_dev *inputdev;
+ struct backlight_device *backlight_device;
+};
+
+static struct platform_device *platform_device;
+
+static int eeepc_wmi_input_init(struct eeepc_wmi *eeepc)
+{
+ int err;
+
+ eeepc->inputdev = input_allocate_device();
+ if (!eeepc->inputdev)
+ return -ENOMEM;
+
+ eeepc->inputdev->name = "Eee PC WMI hotkeys";
+ eeepc->inputdev->phys = EEEPC_WMI_FILE "/input0";
+ eeepc->inputdev->id.bustype = BUS_HOST;
+ eeepc->inputdev->dev.parent = &platform_device->dev;
+
+ err = sparse_keymap_setup(eeepc->inputdev, eeepc_wmi_keymap, NULL);
+ if (err)
+ goto err_free_dev;
+
+ err = input_register_device(eeepc->inputdev);
+ if (err)
+ goto err_free_keymap;
+
+ return 0;
+
+err_free_keymap:
+ sparse_keymap_free(eeepc->inputdev);
+err_free_dev:
+ input_free_device(eeepc->inputdev);
+ return err;
+}
+
+static void eeepc_wmi_input_exit(struct eeepc_wmi *eeepc)
+{
+ if (eeepc->inputdev) {
+ sparse_keymap_free(eeepc->inputdev);
+ input_unregister_device(eeepc->inputdev);
+ }
+
+ eeepc->inputdev = NULL;
+}
+
+static acpi_status eeepc_wmi_get_devstate(u32 dev_id, u32 *ctrl_param)
+{
+ struct acpi_buffer input = { (acpi_size)sizeof(u32), &dev_id };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ u32 tmp;
+
+ status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID,
+ 1, EEEPC_WMI_METHODID_DSTS, &input, &output);
+
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *)output.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ tmp = (u32)obj->integer.value;
+ else
+ tmp = 0;
+
+ if (ctrl_param)
+ *ctrl_param = tmp;
+
+ kfree(obj);
+
+ return status;
+
+}
+
+static acpi_status eeepc_wmi_set_devstate(u32 dev_id, u32 ctrl_param)
+{
+ struct bios_args args = {
+ .dev_id = dev_id,
+ .ctrl_param = ctrl_param,
+ };
+ struct acpi_buffer input = { (acpi_size)sizeof(args), &args };
+ acpi_status status;
+
+ status = wmi_evaluate_method(EEEPC_WMI_MGMT_GUID,
+ 1, EEEPC_WMI_METHODID_DEVS, &input, NULL);
+
+ return status;
+}
+
+static int read_brightness(struct backlight_device *bd)
+{
+ static u32 ctrl_param;
+ acpi_status status;
+
+ status = eeepc_wmi_get_devstate(EEEPC_WMI_DEVID_BACKLIGHT, &ctrl_param);
+
+ if (ACPI_FAILURE(status))
+ return -1;
+ else
+ return ctrl_param & 0xFF;
+}
+
+static int update_bl_status(struct backlight_device *bd)
+{
+
+ static u32 ctrl_param;
+ acpi_status status;
+
+ ctrl_param = bd->props.brightness;
+
+ status = eeepc_wmi_set_devstate(EEEPC_WMI_DEVID_BACKLIGHT, ctrl_param);
+
+ if (ACPI_FAILURE(status))
+ return -1;
+ else
+ return 0;
+}
+
+static const struct backlight_ops eeepc_wmi_bl_ops = {
+ .get_brightness = read_brightness,
+ .update_status = update_bl_status,
+};
+
+static int eeepc_wmi_backlight_notify(struct eeepc_wmi *eeepc, int code)
+{
+ struct backlight_device *bd = eeepc->backlight_device;
+ int old = bd->props.brightness;
+ int new;
+
+ if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
+ new = code - NOTIFY_BRNUP_MIN + 1;
+ else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
+ new = code - NOTIFY_BRNDOWN_MIN;
+
+ bd->props.brightness = new;
+ backlight_update_status(bd);
+ backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
+
+ return old;
+}
+
+static int eeepc_wmi_backlight_init(struct eeepc_wmi *eeepc)
+{
+ struct backlight_device *bd;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 15;
+ bd = backlight_device_register(EEEPC_WMI_FILE,
+ &platform_device->dev, eeepc,
+ &eeepc_wmi_bl_ops, &props);
+ if (IS_ERR(bd)) {
+ pr_err("Could not register backlight device\n");
+ return PTR_ERR(bd);
+ }
+
+ eeepc->backlight_device = bd;
+
+ bd->props.brightness = read_brightness(bd);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
+
+ return 0;
+}
+
+static void eeepc_wmi_backlight_exit(struct eeepc_wmi *eeepc)
+{
+ if (eeepc->backlight_device)
+ backlight_device_unregister(eeepc->backlight_device);
+
+ eeepc->backlight_device = NULL;
+}
static void eeepc_wmi_notify(u32 value, void *context)
{
+ struct eeepc_wmi *eeepc = context;
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
int code;
+ int orig_code;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
- pr_err("EEEPC WMI: bad event status 0x%x\n", status);
+ pr_err("bad event status 0x%x\n", status);
return;
}
if (obj && obj->type == ACPI_TYPE_INTEGER) {
code = obj->integer.value;
+ orig_code = code;
if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
code = NOTIFY_BRNUP_MIN;
- else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
+ else if (code >= NOTIFY_BRNDOWN_MIN &&
+ code <= NOTIFY_BRNDOWN_MAX)
code = NOTIFY_BRNDOWN_MIN;
- if (!sparse_keymap_report_event(eeepc_wmi_input_dev,
+ if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) {
+ if (!acpi_video_backlight_support())
+ eeepc_wmi_backlight_notify(eeepc, orig_code);
+ }
+
+ if (!sparse_keymap_report_event(eeepc->inputdev,
code, 1, true))
- pr_info("EEEPC WMI: Unknown key %x pressed\n", code);
+ pr_info("Unknown key %x pressed\n", code);
}
kfree(obj);
}
-static int eeepc_wmi_input_setup(void)
+static int __devinit eeepc_wmi_platform_probe(struct platform_device *device)
{
+ struct eeepc_wmi *eeepc;
int err;
+ acpi_status status;
- eeepc_wmi_input_dev = input_allocate_device();
- if (!eeepc_wmi_input_dev)
- return -ENOMEM;
-
- eeepc_wmi_input_dev->name = "Eee PC WMI hotkeys";
- eeepc_wmi_input_dev->phys = "wmi/input0";
- eeepc_wmi_input_dev->id.bustype = BUS_HOST;
+ eeepc = platform_get_drvdata(device);
- err = sparse_keymap_setup(eeepc_wmi_input_dev, eeepc_wmi_keymap, NULL);
+ err = eeepc_wmi_input_init(eeepc);
if (err)
- goto err_free_dev;
+ goto error_input;
- err = input_register_device(eeepc_wmi_input_dev);
- if (err)
- goto err_free_keymap;
+ if (!acpi_video_backlight_support()) {
+ err = eeepc_wmi_backlight_init(eeepc);
+ if (err)
+ goto error_backlight;
+ } else
+ pr_info("Backlight controlled by ACPI video driver\n");
+
+ status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID,
+ eeepc_wmi_notify, eeepc);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Unable to register notify handler - %d\n",
+ status);
+ err = -ENODEV;
+ goto error_wmi;
+ }
return 0;
-err_free_keymap:
- sparse_keymap_free(eeepc_wmi_input_dev);
-err_free_dev:
- input_free_device(eeepc_wmi_input_dev);
+error_wmi:
+ eeepc_wmi_backlight_exit(eeepc);
+error_backlight:
+ eeepc_wmi_input_exit(eeepc);
+error_input:
return err;
}
+static int __devexit eeepc_wmi_platform_remove(struct platform_device *device)
+{
+ struct eeepc_wmi *eeepc;
+
+ eeepc = platform_get_drvdata(device);
+ wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID);
+ eeepc_wmi_backlight_exit(eeepc);
+ eeepc_wmi_input_exit(eeepc);
+
+ return 0;
+}
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = EEEPC_WMI_FILE,
+ .owner = THIS_MODULE,
+ },
+ .probe = eeepc_wmi_platform_probe,
+ .remove = __devexit_p(eeepc_wmi_platform_remove),
+};
+
static int __init eeepc_wmi_init(void)
{
+ struct eeepc_wmi *eeepc;
int err;
- acpi_status status;
- if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID)) {
- pr_warning("EEEPC WMI: No known WMI GUID found\n");
+ if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID) ||
+ !wmi_has_guid(EEEPC_WMI_MGMT_GUID)) {
+ pr_warning("No known WMI GUID found\n");
return -ENODEV;
}
- err = eeepc_wmi_input_setup();
- if (err)
- return err;
+ eeepc = kzalloc(sizeof(struct eeepc_wmi), GFP_KERNEL);
+ if (!eeepc)
+ return -ENOMEM;
- status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID,
- eeepc_wmi_notify, NULL);
- if (ACPI_FAILURE(status)) {
- sparse_keymap_free(eeepc_wmi_input_dev);
- input_unregister_device(eeepc_wmi_input_dev);
- pr_err("EEEPC WMI: Unable to register notify handler - %d\n",
- status);
- return -ENODEV;
+ platform_device = platform_device_alloc(EEEPC_WMI_FILE, -1);
+ if (!platform_device) {
+ pr_warning("Unable to allocate platform device\n");
+ err = -ENOMEM;
+ goto fail_platform;
+ }
+
+ err = platform_device_add(platform_device);
+ if (err) {
+ pr_warning("Unable to add platform device\n");
+ goto put_dev;
+ }
+
+ platform_set_drvdata(platform_device, eeepc);
+
+ err = platform_driver_register(&platform_driver);
+ if (err) {
+ pr_warning("Unable to register platform driver\n");
+ goto del_dev;
}
return 0;
+
+del_dev:
+ platform_device_del(platform_device);
+put_dev:
+ platform_device_put(platform_device);
+fail_platform:
+ kfree(eeepc);
+
+ return err;
}
static void __exit eeepc_wmi_exit(void)
{
- wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID);
- sparse_keymap_free(eeepc_wmi_input_dev);
- input_unregister_device(eeepc_wmi_input_dev);
+ struct eeepc_wmi *eeepc;
+
+ eeepc = platform_get_drvdata(platform_device);
+ platform_driver_unregister(&platform_driver);
+ platform_device_unregister(platform_device);
+ kfree(eeepc);
}
module_init(eeepc_wmi_init);
dev_get_platdata(&pdev->dev);
int i;
+ platform_set_drvdata(pdev, NULL);
+
for (i = 0; i < pdata->num_regulators; i++)
regulator_unregister(priv->regulators[i]);
+ kfree(priv);
return 0;
}
/* Process requests that may be recovered */
if (cqr->status == DASD_CQR_NEED_ERP) {
erp_fn = base->discipline->erp_action(cqr);
- erp_fn(cqr);
+ if (IS_ERR(erp_fn(cqr)))
+ continue;
goto restart;
}
cqr->retries);
dasd_block_set_timer(device->block, (HZ << 3));
}
- return cqr;
+ return erp;
}
ccw = cqr->cpaddr;
/* add erp and initialize with default TIC */
erp = dasd_3990_erp_add_erp(cqr);
+ if (IS_ERR(erp))
+ return erp;
+
/* inspect sense, determine specific ERP if possible */
if (erp != cqr) {
if (erp == NULL) {
/* no matching erp found - set up erp */
erp = dasd_3990_erp_additional_erp(cqr);
+ if (IS_ERR(erp))
+ return erp;
} else {
/* matching erp found - set all leading erp's to DONE */
erp = dasd_3990_erp_handle_match_erp(cqr, erp);
rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
else
rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
- if (rc) {
- free_page((unsigned long) ipl_block);
- return rc;
- }
- if (csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
+ if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
ipib_info.checksum) {
TRACE("Checksum does not match\n");
free_page((unsigned long) ipl_block);
#include "chsc.h"
static void *sei_page;
+static DEFINE_SPINLOCK(sda_lock);
/**
* chsc_error_from_response() - convert a chsc response to an error
kfree(sei_page);
}
-int __init
-chsc_enable_facility(int operation_code)
+int chsc_enable_facility(int operation_code)
{
int ret;
- struct {
+ static struct {
struct chsc_header request;
u8 reserved1:4;
u8 format:4;
u32 reserved5:4;
u32 format2:4;
u32 reserved6:24;
- } __attribute__ ((packed)) *sda_area;
+ } __attribute__ ((packed, aligned(4096))) sda_area;
- sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
- if (!sda_area)
- return -ENOMEM;
- sda_area->request.length = 0x0400;
- sda_area->request.code = 0x0031;
- sda_area->operation_code = operation_code;
+ spin_lock(&sda_lock);
+ memset(&sda_area, 0, sizeof(sda_area));
+ sda_area.request.length = 0x0400;
+ sda_area.request.code = 0x0031;
+ sda_area.operation_code = operation_code;
- ret = chsc(sda_area);
+ ret = chsc(&sda_area);
if (ret > 0) {
ret = (ret == 3) ? -ENODEV : -EBUSY;
goto out;
}
- switch (sda_area->response.code) {
+ switch (sda_area.response.code) {
case 0x0101:
ret = -EOPNOTSUPP;
break;
default:
- ret = chsc_error_from_response(sda_area->response.code);
+ ret = chsc_error_from_response(sda_area.response.code);
}
if (ret != 0)
CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
- operation_code, sda_area->response.code);
+ operation_code, sda_area.response.code);
out:
- free_page((unsigned long)sda_area);
+ spin_unlock(&sda_lock);
return ret;
}
* since we don't have a way to clear the subchannel and
* cannot disable it with a request running.
*/
- cc = stsch(sch->schid, &schib);
+ cc = stsch_err(sch->schid, &schib);
if (!cc && scsw_stctl(&schib.scsw))
return -EAGAIN;
return 0;
struct schib schib;
int ccode, retry, ret = 0;
- if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
+ if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
return -ENODEV;
for (retry = 0; retry < 5; retry++) {
return ccode;
switch (ccode) {
case 0: /* successful */
- if (stsch(sch->schid, &schib) ||
+ if (stsch_err(sch->schid, &schib) ||
!css_sch_is_valid(&schib))
return -ENODEV;
if (cio_check_config(sch, &schib)) {
{
struct schib schib;
- if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
+ if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
return -ENODEV;
memcpy(&sch->schib, &schib, sizeof(schib));
if (console_irq != -1) {
/* VM provided us with the irq number of the console. */
schid.sch_no = console_irq;
- if (stsch(schid, &console_subchannel.schib) != 0 ||
+ if (stsch_err(schid, &console_subchannel.schib) != 0 ||
(console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
!console_subchannel.schib.pmcw.dnv)
return -1;
cc = 0;
for (retry=0;retry<3;retry++) {
schib->pmcw.ena = 0;
- cc = msch(schid, schib);
+ cc = msch_err(schid, schib);
if (cc)
return (cc==3?-ENODEV:-EBUSY);
- if (stsch(schid, schib) || !css_sch_is_valid(schib))
+ if (stsch_err(schid, schib) || !css_sch_is_valid(schib))
return -ENODEV;
if (!schib->pmcw.ena)
return 0;
pgm_check_occured = 0;
s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
- rc = stsch(schid, addr);
+ rc = stsch_err(schid, addr);
s390_base_pgm_handler_fn = NULL;
/* The program check handler could have changed pgm_check_occured. */
/* No default clear strategy */
break;
}
- stsch(schid, &schib);
+ stsch_err(schid, &schib);
__disable_subchannel_easy(schid, &schib);
}
out:
schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
if (!schid.one)
return -ENODEV;
- if (stsch(schid, &schib))
+ if (stsch_err(schid, &schib))
return -ENODEV;
if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
return -ENODEV;
/* Try to enable MSS. */
ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
- switch (ret) {
- case 0: /* Success. */
- max_ssid = __MAX_SSID;
- break;
- case -ENOMEM:
- goto out;
- default:
+ if (ret)
max_ssid = 0;
- }
+ else /* Success. */
+ max_ssid = __MAX_SSID;
ret = slow_subchannel_init();
if (ret)
}
subsys_initcall_sync(channel_subsystem_init_sync);
+void channel_subsystem_reinit(void)
+{
+ chsc_enable_facility(CHSC_SDA_OC_MSS);
+}
+
#ifdef CONFIG_PROC_FS
static ssize_t cio_settle_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
sch = to_subchannel(cdev->dev.parent);
private = to_io_private(sch);
orb = &private->orb;
- cc = stsch(sch->schid, &schib);
+ cc = stsch_err(sch->schid, &schib);
printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
"device information:\n", get_clock());
blktrc.inb_usage = req->qdio_req.qdio_inb_usage;
blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
- if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
+ if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
+ !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
blktrc.flags |= ZFCP_BLK_LAT_VALID;
blktrc.channel_lat = lat_in->channel_lat * ticks;
blktrc.fabric_lat = lat_in->fabric_lat * ticks;
fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
- zfcp_fsf_req_trace(req, scpnt);
-
skip_fsfstatus:
+ zfcp_fsf_req_trace(req, scpnt);
zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req);
scpnt->host_scribble = NULL;
SE_DEBUG(DBG_LVL_1,
"Failed to allocate memory for"
"mgmt_invalidate_icds \n");
+ spin_unlock(&ctrl->mbox_lock);
return -1;
}
nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
u32 num_ccell;
int ofld_conns_active;
+ wait_queue_head_t eh_wait;
int max_active_conns;
struct iscsi_cid_queue cid_que;
spinlock_t lock; /* protects hba structure access */
struct mutex net_dev_lock;/* sync net device access */
+ int hba_shutdown_tmo;
/*
* PCI related info.
*/
struct bnx2i_hba *hba = handle;
/* check if cleanup happened in GOING_DOWN context */
- clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
&hba->adapter_state))
iscsi_host_for_each_session(hba->shost,
bnx2i_drop_session);
+
+ /* Wait for all endpoints to be torn down, Chip will be reset once
+ * control returns to network driver. So it is required to cleanup and
+ * release all connection resources before returning from this routine.
+ */
+ wait_event_interruptible_timeout(hba->eh_wait,
+ (hba->ofld_conns_active == 0),
+ hba->hba_shutdown_tmo);
+ /* This flag should be cleared last so that ep_disconnect() gracefully
+ * cleans up connection context
+ */
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
}
/**
spin_lock_init(&hba->lock);
mutex_init(&hba->net_dev_lock);
+ init_waitqueue_head(&hba->eh_wait);
+ if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+ hba->hba_shutdown_tmo = 240 * HZ;
+ else /* 5706/5708/5709 */
+ hba->hba_shutdown_tmo = 30 * HZ;
if (iscsi_host_add(shost, &hba->pcidev->dev))
goto free_dump_mem;
*/
hba = bnx2i_check_route(dst_addr);
- if (!hba) {
- rc = -ENOMEM;
+ if (!hba || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) {
+ rc = -EINVAL;
goto check_busy;
}
(bnx2i_ep->state ==
EP_STATE_CONNECT_COMPL)),
msecs_to_jiffies(timeout_ms));
- if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
+ if (bnx2i_ep->state == EP_STATE_OFLD_FAILED)
rc = -1;
if (rc > 0)
if (!hba->ofld_conns_active)
bnx2i_unreg_dev_all();
+
+ wake_up_interruptible(&hba->eh_wait);
}
static int adpt_detect(struct scsi_host_template* sht)
{
struct pci_dev *pDev = NULL;
- adpt_hba* pHba;
+ adpt_hba *pHba;
+ adpt_hba *next;
PINFO("Detecting Adaptec I2O RAID controllers...\n");
}
/* In INIT state, Activate IOPs */
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
+ for (pHba = hba_chain; pHba; pHba = next) {
+ next = pHba->next;
// Activate does get status , init outbound, and get hrt
if (adpt_i2o_activate_hba(pHba) < 0) {
adpt_i2o_delete_hba(pHba);
PDEBUG("HBA's in OPERATIONAL state\n");
printk("dpti: If you have a lot of devices this could take a few minutes.\n");
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
+ for (pHba = hba_chain; pHba; pHba = next) {
+ next = pHba->next;
printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
if (adpt_i2o_lct_get(pHba) < 0){
adpt_i2o_delete_hba(pHba);
adpt_sysfs_class = NULL;
}
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
+ for (pHba = hba_chain; pHba; pHba = next) {
+ next = pHba->next;
if (adpt_scsi_host_alloc(pHba, sht) < 0){
adpt_i2o_delete_hba(pHba);
continue;
}
}
pci_dev_put(pHba->pDev);
- kfree(pHba);
-
if (adpt_sysfs_class)
device_destroy(adpt_sysfs_class,
MKDEV(DPTI_I2O_MAJOR, pHba->unit));
+ kfree(pHba);
if(hba_count <= 0){
unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
srp_cmd->buf_fmt = fmt;
}
-static void unmap_sg_list(int num_entries,
- struct device *dev,
- struct srp_direct_buf *md)
-{
- int i;
-
- for (i = 0; i < num_entries; ++i)
- dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL);
-}
-
/**
* unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
* @cmd: srp_cmd whose additional_data member will be unmapped
if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
return;
- else if (out_fmt == SRP_DATA_DESC_DIRECT ||
- in_fmt == SRP_DATA_DESC_DIRECT) {
- struct srp_direct_buf *data =
- (struct srp_direct_buf *) cmd->add_data;
- dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL);
- } else {
- struct srp_indirect_buf *indirect =
- (struct srp_indirect_buf *) cmd->add_data;
- int num_mapped = indirect->table_desc.len /
- sizeof(struct srp_direct_buf);
- if (num_mapped <= MAX_INDIRECT_BUFS) {
- unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]);
- return;
- }
-
- unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
- }
+ if (evt_struct->cmnd)
+ scsi_dma_unmap(evt_struct->cmnd);
}
static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
- if (sock->sk->sk_sleep && waitqueue_active(sock->sk->sk_sleep)) {
+ if (sock->sk->sk_sleep) {
sock->sk->sk_err = EIO;
wake_up_interruptible(sock->sk->sk_sleep);
}
dd_data = cmdiocbq->context1;
/* normal completion and timeout crossed paths, already done */
if (!dd_data) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
return;
}
dd_data = cmdiocbq->context1;
/* normal completion and timeout crossed paths, already done */
if (!dd_data) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
return;
}
return 0;
done:
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (bsg_job->request->msgcode == FC_BSG_HST_CT)
kfree(sp->fcport);
kfree(sp->ctx);
if (conn_err_detail)
*conn_err_detail = mbox_sts[5];
if (tcp_source_port_num)
- *tcp_source_port_num = (uint16_t) mbox_sts[6] >> 16;
+ *tcp_source_port_num = (uint16_t) (mbox_sts[6] >> 16);
if (connection_id)
*connection_id = (uint16_t) mbox_sts[6] & 0x00FF;
status = QLA_SUCCESS;
{
Adapter *host = (Adapter *) SCpnt->device->host->hostdata;
- spin_unlock_irq(SCpnt->device->host->host_lock);
+ spin_lock_irq(SCpnt->device->host->host_lock);
if (wd7000_adapter_reset(host) < 0) {
spin_unlock_irq(SCpnt->device->host->host_lock);
}
spin_lock_irqsave(&port->lock, flags);
+ uart_update_timeout(port, termios->c_cflag, baud);
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR);
static void mcf_config_port(struct uart_port *port, int flags)
{
port->type = PORT_MCF;
+ port->fifosize = MCFUART_TXFIFOSIZE;
/* Clear mask, so no surprise interrupts. */
writeb(0, port->membase + MCFUART_UIMR);
/*
* Define the basic serial functions we support.
*/
-static struct uart_ops mcf_uart_ops = {
+static const struct uart_ops mcf_uart_ops = {
.tx_empty = mcf_tx_empty,
.get_mctrl = mcf_get_mctrl,
.set_mctrl = mcf_set_mctrl,
.verify_port = mcf_verify_port,
};
-static struct mcf_uart mcf_ports[3];
+static struct mcf_uart mcf_ports[4];
#define MCF_MAXPORTS ARRAY_SIZE(mcf_ports)
extern void printques(int);
-#ifdef MODULE
#include <linux/module.h>
#include <linux/interrupt.h>
-
-
-MODULE_LICENSE("GPL");
-
-#endif
-
-#ifndef CONFIG_PCI
-#error "DT3155 : Kernel PCI support not enabled (DT3155 drive requires PCI)"
-#endif
-
#include <linux/pci.h>
#include <linux/types.h>
#include <linux/poll.h>
#include "dt3155_io.h"
#include "allocator.h"
+
+MODULE_LICENSE("GPL");
+
/* Error variable. Zero means no error. */
int dt3155_errno = 0;
intf->condition = USB_INTERFACE_BINDING;
- /* Bound interfaces are initially active. They are
+ /* Probed interfaces are initially active. They are
* runtime-PM-enabled only if the driver has autosuspend support.
* They are sensitive to their children's power states.
*/
iface->condition = USB_INTERFACE_BOUND;
- /* Bound interfaces are initially active. They are
+ /* Claimed interfaces are initially inactive (suspended). They are
* runtime-PM-enabled only if the driver has autosuspend support.
* They are sensitive to their children's power states.
*/
- pm_runtime_set_active(dev);
+ pm_runtime_set_suspended(dev);
pm_suspend_ignore_children(dev, false);
if (driver->supports_autosuspend)
pm_runtime_enable(dev);
static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
{
int status = 0;
- int i = 0;
+ int i = 0, n = 0;
struct usb_interface *intf;
if (udev->state == USB_STATE_NOTATTACHED ||
/* Suspend all the interfaces and then udev itself */
if (udev->actconfig) {
- for (; i < udev->actconfig->desc.bNumInterfaces; i++) {
+ n = udev->actconfig->desc.bNumInterfaces;
+ for (i = n - 1; i >= 0; --i) {
intf = udev->actconfig->interface[i];
status = usb_suspend_interface(udev, intf, msg);
if (status != 0)
/* If the suspend failed, resume interfaces that did get suspended */
if (status != 0) {
msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
- while (--i >= 0) {
+ while (++i < n) {
intf = udev->actconfig->interface[i];
usb_resume_interface(udev, intf, msg, 0);
}
return status;
}
+static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
+{
+ int w, i;
+ struct usb_interface *intf;
+
+ /* Remote wakeup is needed only when we actually go to sleep.
+ * For things like FREEZE and QUIESCE, if the device is already
+ * autosuspended then its current wakeup setting is okay.
+ */
+ if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) {
+ if (udev->state != USB_STATE_SUSPENDED)
+ udev->do_remote_wakeup = 0;
+ return;
+ }
+
+ /* If remote wakeup is permitted, see whether any interface drivers
+ * actually want it.
+ */
+ w = 0;
+ if (device_may_wakeup(&udev->dev) && udev->actconfig) {
+ for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
+ intf = udev->actconfig->interface[i];
+ w |= intf->needs_remote_wakeup;
+ }
+ }
+
+ /* If the device is autosuspended with the wrong wakeup setting,
+ * autoresume now so the setting can be changed.
+ */
+ if (udev->state == USB_STATE_SUSPENDED && w != udev->do_remote_wakeup)
+ pm_runtime_resume(&udev->dev);
+ udev->do_remote_wakeup = w;
+}
+
/* The device lock is held by the PM core */
int usb_suspend(struct device *dev, pm_message_t msg)
{
struct usb_device *udev = to_usb_device(dev);
do_unbind_rebind(udev, DO_UNBIND);
- udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
+ choose_wakeup(udev, msg);
return usb_suspend_both(udev, msg);
}
*/
ehci->periodic_size = DEFAULT_I_TDPS;
INIT_LIST_HEAD(&ehci->cached_itd_list);
+ INIT_LIST_HEAD(&ehci->cached_sitd_list);
if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
return retval;
* this bit; seems too long to spin routinely...
*/
retval = handshake(ehci, status_reg,
- PORT_RESET, 0, 750);
+ PORT_RESET, 0, 1000);
if (retval != 0) {
ehci_err (ehci, "port %d reset error %d\n",
wIndex + 1, retval);
static void ehci_mem_cleanup (struct ehci_hcd *ehci)
{
- free_cached_itd_list(ehci);
+ free_cached_lists(ehci);
if (ehci->async)
qh_put (ehci->async);
ehci->async = NULL;
}
snprintf(supply, sizeof(supply), "hsusb%d", i);
omap->regulator[i] = regulator_get(omap->dev, supply);
- if (IS_ERR(omap->regulator[i]))
+ if (IS_ERR(omap->regulator[i])) {
+ omap->regulator[i] = NULL;
dev_dbg(&pdev->dev,
"failed to get ehci port%d regulator\n", i);
- else
+ } else {
regulator_enable(omap->regulator[i]);
+ }
}
ret = omap_start_ehc(omap, hcd);
ehci_writel(ehci, cmd, &ehci->regs->command);
/* posted write ... */
- free_cached_itd_list(ehci);
+ free_cached_lists(ehci);
ehci->next_uframe = -1;
return 0;
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
}
iso_stream_put (ehci, stream);
- /* OK to recycle this SITD now that its completion callback ran. */
+
done:
sitd->urb = NULL;
- sitd->stream = NULL;
- list_move(&sitd->sitd_list, &stream->free_list);
- iso_stream_put(ehci, stream);
-
+ if (ehci->clock_frame != sitd->frame) {
+ /* OK to recycle this SITD now. */
+ sitd->stream = NULL;
+ list_move(&sitd->sitd_list, &stream->free_list);
+ iso_stream_put(ehci, stream);
+ } else {
+ /* HW might remember this SITD, so we can't recycle it yet.
+ * Move it to a safe place until a new frame starts.
+ */
+ list_move(&sitd->sitd_list, &ehci->cached_sitd_list);
+ if (stream->refcount == 2) {
+ /* If iso_stream_put() were called here, stream
+ * would be freed. Instead, just prevent reuse.
+ */
+ stream->ep->hcpriv = NULL;
+ stream->ep = NULL;
+ }
+ }
return retval;
}
/*-------------------------------------------------------------------------*/
-static void free_cached_itd_list(struct ehci_hcd *ehci)
+static void free_cached_lists(struct ehci_hcd *ehci)
{
struct ehci_itd *itd, *n;
+ struct ehci_sitd *sitd, *sn;
list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
struct ehci_iso_stream *stream = itd->stream;
list_move(&itd->itd_list, &stream->free_list);
iso_stream_put(ehci, stream);
}
+
+ list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
+ struct ehci_iso_stream *stream = sitd->stream;
+ sitd->stream = NULL;
+ list_move(&sitd->sitd_list, &stream->free_list);
+ iso_stream_put(ehci, stream);
+ }
}
/*-------------------------------------------------------------------------*/
clock_frame = -1;
}
if (ehci->clock_frame != clock_frame) {
- free_cached_itd_list(ehci);
+ free_cached_lists(ehci);
ehci->clock_frame = clock_frame;
}
clock %= mod;
clock = now;
clock_frame = clock >> 3;
if (ehci->clock_frame != clock_frame) {
- free_cached_itd_list(ehci);
+ free_cached_lists(ehci);
ehci->clock_frame = clock_frame;
}
} else {
int next_uframe; /* scan periodic, start here */
unsigned periodic_sched; /* periodic activity count */
- /* list of itds completed while clock_frame was still active */
+ /* list of itds & sitds completed while clock_frame was still active */
struct list_head cached_itd_list;
+ struct list_head cached_sitd_list;
unsigned clock_frame;
/* per root hub port */
clear_bit (action, &ehci->actions);
}
-static void free_cached_itd_list(struct ehci_hcd *ehci);
+static void free_cached_lists(struct ehci_hcd *ehci);
/*-------------------------------------------------------------------------*/
#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX."
#endif
-#define CFGCHIP2 DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG)
+#define CFGCHIP2 DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG)
static struct clk *usb11_clk;
static struct clk *usb20_clk;
u16 textlength;
u8 shadow_power; /* for PM */
+ u8 has_interface_pm;
};
/* sysfs_streq can't replace this completely
{
int rc;
- if (!mydev->shadow_power && mydev->powered) {
+ if (mydev->powered && !mydev->has_interface_pm) {
rc = usb_autopm_get_interface(mydev->intf);
if (rc < 0)
return;
+ mydev->has_interface_pm = 1;
}
+ if (mydev->shadow_power != 1)
+ return;
+
rc = usb_control_msg(mydev->udev,
usb_sndctrlpipe(mydev->udev, 0),
0x12,
if (rc < 0)
dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc);
- if (mydev->shadow_power && !mydev->powered)
+ if (!mydev->powered && mydev->has_interface_pm) {
usb_autopm_put_interface(mydev->intf);
+ mydev->has_interface_pm = 0;
+ }
}
static void update_display_mode(struct usb_sevsegdev *mydev)
mydev->intf = interface;
usb_set_intfdata(interface, mydev);
+ /* PM */
+ mydev->shadow_power = 1; /* currently active */
+ mydev->has_interface_pm = 0; /* have not issued autopm_get */
+
/*set defaults */
mydev->textmode = 0x02; /* ascii mode */
mydev->mode_msb = 0x06; /* 6 characters */
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
+ { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
{ } /* Terminating entry */
};
/* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */
#define SANWA_VENDOR_ID 0x11ad
#define SANWA_PRODUCT_ID 0x0001
+
+/* ADLINK ND-6530 RS232,RS485 and RS422 adapter */
+#define ADLINK_VENDOR_ID 0x0b63
+#define ADLINK_ND6530_PRODUCT_ID 0x6530
#define CMOTECH_PRODUCT_CDU550 0x5553
#define CMOTECH_PRODUCT_CDX650 0x6512
+/* LG devices */
+#define LG_VENDOR_ID 0x1004
+#define LG_PRODUCT_VX4400_6000 0x6000 /* VX4400/VX6000/Rumor */
+
+/* Sanyo devices */
+#define SANYO_VENDOR_ID 0x0474
+#define SANYO_PRODUCT_KATANA_LX 0x0754 /* SCP-3800 (Katana LX) */
+
static struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
{ USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */
+ { USB_DEVICE(0x03F0, 0x211D) }, /* HP ev2210 a.k.a MC5725 */
{ USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */
{ USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
/* the array dimension is the number of default entries plus */
/* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */
/* null entry */
-static struct usb_device_id ti_id_table_3410[10+TI_EXTRA_VID_PID_COUNT+1] = {
+static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) },
+ { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) },
+ { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) },
+ { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
};
-static struct usb_device_id ti_id_table_combined[14+2*TI_EXTRA_VID_PID_COUNT+1] = {
+static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_CDMA_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_PRODUCT_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_EDGE_PRODUCT_ID) },
+ { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234MU_PRODUCT_ID) },
+ { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBA_PRODUCT_ID) },
+ { USB_DEVICE(MTS_VENDOR_ID, MTS_MT9234ZBAOLD_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
MODULE_FIRMWARE("mts_cdma.fw");
MODULE_FIRMWARE("mts_gsm.fw");
MODULE_FIRMWARE("mts_edge.fw");
+MODULE_FIRMWARE("mts_mt9234mu.fw");
+MODULE_FIRMWARE("mts_mt9234zba.fw");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes");
const struct firmware *fw_p;
char buf[32];
+ dbg("%s\n", __func__);
/* try ID specific firmware first, then try generic firmware */
sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor,
dev->descriptor.idProduct);
case MTS_EDGE_PRODUCT_ID:
strcpy(buf, "mts_edge.fw");
break;
- }
+ case MTS_MT9234MU_PRODUCT_ID:
+ strcpy(buf, "mts_mt9234mu.fw");
+ break;
+ case MTS_MT9234ZBA_PRODUCT_ID:
+ strcpy(buf, "mts_mt9234zba.fw");
+ break;
+ case MTS_MT9234ZBAOLD_PRODUCT_ID:
+ strcpy(buf, "mts_mt9234zba.fw");
+ break; }
}
if (buf[0] == '\0') {
if (tdev->td_is_3410)
return -ENOENT;
}
if (fw_p->size > TI_FIRMWARE_BUF_SIZE) {
- dev_err(&dev->dev, "%s - firmware too large\n", __func__);
+ dev_err(&dev->dev, "%s - firmware too large %d \n", __func__, fw_p->size);
return -ENOENT;
}
status = ti_do_download(dev, pipe, buffer, fw_p->size);
kfree(buffer);
} else {
+ dbg("%s ENOMEM\n", __func__);
status = -ENOMEM;
}
release_firmware(fw_p);
#define MTS_CDMA_PRODUCT_ID 0xF110
#define MTS_GSM_PRODUCT_ID 0xF111
#define MTS_EDGE_PRODUCT_ID 0xF112
+#define MTS_MT9234MU_PRODUCT_ID 0xF114
+#define MTS_MT9234ZBA_PRODUCT_ID 0xF115
+#define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319
/* Commands */
#define TI_GET_VERSION 0x01
old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr);
keep_alives = 0;
for (cnt = 0;
- keep_alives <= WUIE_ELT_MAX && cnt < wusbhc->ports_max;
+ keep_alives < WUIE_ELT_MAX && cnt < wusbhc->ports_max;
cnt++) {
unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout);
M_MBP_2, /* MacBook Pro 2nd gen */
M_MBP_SR, /* MacBook Pro (Santa Rosa) */
M_MBP_4, /* MacBook Pro, 4th gen */
+ M_MBP_5_1, /* MacBook Pro, 5,1th gen */
M_UNKNOWN /* placeholder */
};
[M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */
[M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 },
[M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 },
+ [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 },
[M_UNKNOWN] = { NULL, 0, 0, 0, 0 }
};
EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
{},
};
num = min(num, ARRAY_SIZE(vb->pfns));
for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns++) {
- struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY);
+ struct page *page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY |
+ __GFP_NOMEMALLOC | __GFP_NOWARN);
if (!page) {
if (printk_ratelimit())
dev_printk(KERN_INFO, &vb->vdev->dev,
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/sched.h>
#include <asm/irq.h>
#include <mach/hardware.h>
static inline int w1_DS18B20_convert_temp(u8 rom[9])
{
- int t = ((s16)rom[1] << 8) | rom[0];
- t = t*1000/16;
- return t;
+ s16 t = le16_to_cpup((__le16 *)rom);
+ return t*1000/16;
}
static inline int w1_DS18S20_convert_temp(u8 rom[9])
{
struct afs_super_info *super;
struct vfsmount *mnt;
- struct page *page = NULL;
+ struct page *page;
size_t size;
- char *buf, *devname = NULL, *options = NULL;
+ char *buf, *devname, *options;
int ret;
_enter("{%s}", mntpt->d_name.name);
ret = -EINVAL;
size = mntpt->d_inode->i_size;
if (size > PAGE_SIZE - 1)
- goto error;
+ goto error_no_devname;
ret = -ENOMEM;
devname = (char *) get_zeroed_page(GFP_KERNEL);
if (!devname)
- goto error;
+ goto error_no_devname;
options = (char *) get_zeroed_page(GFP_KERNEL);
if (!options)
- goto error;
+ goto error_no_options;
/* read the contents of the AFS special symlink */
page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
- goto error;
+ goto error_no_page;
}
ret = -EIO;
return mnt;
error:
- if (page)
- page_cache_release(page);
- if (devname)
- free_page((unsigned long) devname);
- if (options)
- free_page((unsigned long) options);
+ page_cache_release(page);
+error_no_page:
+ free_page((unsigned long) options);
+error_no_options:
+ free_page((unsigned long) devname);
+error_no_devname:
_leave(" = %d", ret);
return ERR_PTR(ret);
}
if (!flat_reloc_valid(r, start_brk - start_data + text_len)) {
printk("BINFMT_FLAT: reloc outside program 0x%x (0 - 0x%x/0x%x)",
- (int) r,(int)(start_brk-start_code),(int)text_len);
+ (int) r,(int)(start_brk-start_data+text_len),(int)text_len);
goto failed;
}
int blkdev_fsync(struct file *filp, struct dentry *dentry, int datasync)
{
- struct block_device *bdev = I_BDEV(filp->f_mapping->host);
+ struct inode *bd_inode = filp->f_mapping->host;
+ struct block_device *bdev = I_BDEV(bd_inode);
int error;
- error = sync_blockdev(bdev);
- if (error)
- return error;
-
+ /*
+ * There is no need to serialise calls to blkdev_issue_flush with
+ * i_mutex and doing so causes performance issues with concurrent
+ * O_SYNC writers to a block device.
+ */
+ mutex_unlock(&bd_inode->i_mutex);
+
error = blkdev_issue_flush(bdev, NULL);
if (error == -EOPNOTSUPP)
error = 0;
+
+ mutex_lock(&bd_inode->i_mutex);
+
return error;
}
EXPORT_SYMBOL(blkdev_fsync);
physical += offset;
length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
flags |= FIEMAP_EXTENT_DATA_INLINE;
+ brelse(iloc.bh);
} else { /* external block */
physical = EXT4_I(inode)->i_file_acl << blockbits;
length = inode->i_sb->s_blocksize;
} else {
struct ext4_iloc iloc;
- err = ext4_get_inode_loc(inode, &iloc);
+ err = __ext4_get_inode_loc(inode, &iloc, 0);
if (err)
return err;
if (wbc->sync_mode == WB_SYNC_ALL)
(unsigned long long)iloc.bh->b_blocknr);
err = -EIO;
}
+ brelse(iloc.bh);
}
return err;
}
mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
entry->count, entry->group, entry);
+ if (test_opt(sb, DISCARD)) {
+ ext4_fsblk_t discard_block;
+
+ discard_block = entry->start_blk +
+ ext4_group_first_block_no(sb, entry->group);
+ trace_ext4_discard_blocks(sb,
+ (unsigned long long)discard_block,
+ entry->count);
+ sb_issue_discard(sb, discard_block, entry->count);
+ }
+
err = ext4_mb_load_buddy(sb, entry->group, &e4b);
/* we expect to find existing buddy because it's pinned */
BUG_ON(err != 0);
page_cache_release(e4b.bd_bitmap_page);
}
ext4_unlock_group(sb, entry->group);
- if (test_opt(sb, DISCARD)) {
- ext4_fsblk_t discard_block;
-
- discard_block = entry->start_blk +
- ext4_group_first_block_no(sb, entry->group);
- trace_ext4_discard_blocks(sb,
- (unsigned long long)discard_block,
- entry->count);
- sb_issue_discard(sb, discard_block, entry->count);
- }
kmem_cache_free(ext4_free_ext_cachep, entry);
ext4_mb_release_desc(&e4b);
}
#ifdef CONFIG_BLOCK
-#define blk_to_logical(inode, blk) (blk << (inode)->i_blkbits)
-#define logical_to_blk(inode, offset) (offset >> (inode)->i_blkbits);
+static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
+{
+ return (offset >> inode->i_blkbits);
+}
+
+static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
+{
+ return (blk << inode->i_blkbits);
+}
/**
* __generic_block_fiemap - FIEMAP for block based inodes (no locking)
- * @inode - the inode to map
- * @arg - the pointer to userspace where we copy everything to
- * @get_block - the fs's get_block function
+ * @inode: the inode to map
+ * @fieinfo: the fiemap info struct that will be passed back to userspace
+ * @start: where to start mapping in the inode
+ * @len: how much space to map
+ * @get_block: the fs's get_block function
*
* This does FIEMAP for block based inodes. Basically it will just loop
* through get_block until we hit the number of extents we want to map, or we
*/
int __generic_block_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo, u64 start,
- u64 len, get_block_t *get_block)
+ struct fiemap_extent_info *fieinfo, loff_t start,
+ loff_t len, get_block_t *get_block)
{
- struct buffer_head tmp;
- unsigned long long start_blk;
- long long length = 0, map_len = 0;
+ struct buffer_head map_bh;
+ sector_t start_blk, last_blk;
+ loff_t isize = i_size_read(inode);
u64 logical = 0, phys = 0, size = 0;
u32 flags = FIEMAP_EXTENT_MERGED;
- int ret = 0, past_eof = 0, whole_file = 0;
+ bool past_eof = false, whole_file = false;
+ int ret = 0;
- if ((ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC)))
+ ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
+ if (ret)
return ret;
- start_blk = logical_to_blk(inode, start);
-
- length = (long long)min_t(u64, len, i_size_read(inode));
- if (length < len)
- whole_file = 1;
+ /*
+ * Either the i_mutex or other appropriate locking needs to be held
+ * since we expect isize to not change at all through the duration of
+ * this call.
+ */
+ if (len >= isize) {
+ whole_file = true;
+ len = isize;
+ }
- map_len = length;
+ start_blk = logical_to_blk(inode, start);
+ last_blk = logical_to_blk(inode, start + len - 1);
do {
/*
* we set b_size to the total size we want so it will map as
* many contiguous blocks as possible at once
*/
- memset(&tmp, 0, sizeof(struct buffer_head));
- tmp.b_size = map_len;
+ memset(&map_bh, 0, sizeof(struct buffer_head));
+ map_bh.b_size = len;
- ret = get_block(inode, start_blk, &tmp, 0);
+ ret = get_block(inode, start_blk, &map_bh, 0);
if (ret)
break;
/* HOLE */
- if (!buffer_mapped(&tmp)) {
- length -= blk_to_logical(inode, 1);
+ if (!buffer_mapped(&map_bh)) {
start_blk++;
/*
- * we want to handle the case where there is an
+ * We want to handle the case where there is an
* allocated block at the front of the file, and then
* nothing but holes up to the end of the file properly,
* to make sure that extent at the front gets properly
* marked with FIEMAP_EXTENT_LAST
*/
if (!past_eof &&
- blk_to_logical(inode, start_blk) >=
- blk_to_logical(inode, 0)+i_size_read(inode))
+ blk_to_logical(inode, start_blk) >= isize)
past_eof = 1;
/*
- * first hole after going past the EOF, this is our
+ * First hole after going past the EOF, this is our
* last extent
*/
if (past_eof && size) {
ret = fiemap_fill_next_extent(fieinfo, logical,
phys, size,
flags);
- break;
+ } else if (size) {
+ ret = fiemap_fill_next_extent(fieinfo, logical,
+ phys, size, flags);
+ size = 0;
}
/* if we have holes up to/past EOF then we're done */
- if (length <= 0 || past_eof)
+ if (start_blk > last_blk || past_eof || ret)
break;
} else {
/*
- * we have gone over the length of what we wanted to
+ * We have gone over the length of what we wanted to
* map, and it wasn't the entire file, so add the extent
* we got last time and exit.
*
* are good to go, just add the extent to the fieinfo
* and break
*/
- if (length <= 0 && !whole_file) {
+ if (start_blk > last_blk && !whole_file) {
ret = fiemap_fill_next_extent(fieinfo, logical,
phys, size,
flags);
}
logical = blk_to_logical(inode, start_blk);
- phys = blk_to_logical(inode, tmp.b_blocknr);
- size = tmp.b_size;
+ phys = blk_to_logical(inode, map_bh.b_blocknr);
+ size = map_bh.b_size;
flags = FIEMAP_EXTENT_MERGED;
- length -= tmp.b_size;
start_blk += logical_to_blk(inode, size);
/*
* soon as we find a hole that the last extent we found
* is marked with FIEMAP_EXTENT_LAST
*/
- if (!past_eof &&
- logical+size >=
- blk_to_logical(inode, 0)+i_size_read(inode))
- past_eof = 1;
+ if (!past_eof && logical + size >= isize)
+ past_eof = true;
}
cond_resched();
} while (1);
- /* if ret is 1 then we just hit the end of the extent array */
+ /* If ret is 1 then we just hit the end of the extent array */
if (ret == 1)
ret = 0;
inode->i_op = &page_symlink_inode_operations;
inode->i_mapping->a_ops = &jfs_aops;
} else {
- inode->i_op = &jfs_symlink_inode_operations;
+ inode->i_op = &jfs_fast_symlink_inode_operations;
/*
* The inline data should be null-terminated, but
* don't let on-disk corruption crash the kernel
bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
- bmp->db_agheigth = le32_to_cpu(dbmp_le->dn_agheigth);
+ bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight);
bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag);
dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref);
dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel);
- dbmp_le->dn_agheigth = cpu_to_le32(bmp->db_agheigth);
+ dbmp_le->dn_agheight = cpu_to_le32(bmp->db_agheight);
dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth);
dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart);
dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size);
* tree index of this allocation group within the control page.
*/
agperlev =
- (1 << (L2LPERCTL - (bmp->db_agheigth << 1))) / bmp->db_agwidth;
+ (1 << (L2LPERCTL - (bmp->db_agheight << 1))) / bmp->db_agwidth;
ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1));
/* dmap control page trees fan-out by 4 and a single allocation
* the subtree to find the leftmost leaf that describes this
* free space.
*/
- for (k = bmp->db_agheigth; k > 0; k--) {
+ for (k = bmp->db_agheight; k > 0; k--) {
for (n = 0, m = (ti << 2) + 1; n < 4; n++) {
if (l2nb <= dcp->stree[m + n]) {
ti = m + n;
}
/*
- * compute db_aglevel, db_agheigth, db_width, db_agstart:
+ * compute db_aglevel, db_agheight, db_width, db_agstart:
* an ag is covered in aglevel dmapctl summary tree,
* at agheight level height (from leaf) with agwidth number of nodes
* each, which starts at agstart index node of the smmary tree node
bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize);
l2nl =
bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL);
- bmp->db_agheigth = l2nl >> 1;
- bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheigth << 1));
- for (i = 5 - bmp->db_agheigth, bmp->db_agstart = 0, n = 1; i > 0;
+ bmp->db_agheight = l2nl >> 1;
+ bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheight << 1));
+ for (i = 5 - bmp->db_agheight, bmp->db_agstart = 0, n = 1; i > 0;
i--) {
bmp->db_agstart += n;
n <<= 2;
__le32 dn_maxag; /* 4: max active alloc group number */
__le32 dn_agpref; /* 4: preferred alloc group (hint) */
__le32 dn_aglevel; /* 4: dmapctl level holding the AG */
- __le32 dn_agheigth; /* 4: height in dmapctl of the AG */
+ __le32 dn_agheight; /* 4: height in dmapctl of the AG */
__le32 dn_agwidth; /* 4: width in dmapctl of the AG */
__le32 dn_agstart; /* 4: start tree index at AG height */
__le32 dn_agl2size; /* 4: l2 num of blks per alloc group */
int dn_maxag; /* max active alloc group number */
int dn_agpref; /* preferred alloc group (hint) */
int dn_aglevel; /* dmapctl level holding the AG */
- int dn_agheigth; /* height in dmapctl of the AG */
+ int dn_agheight; /* height in dmapctl of the AG */
int dn_agwidth; /* width in dmapctl of the AG */
int dn_agstart; /* start tree index at AG height */
int dn_agl2size; /* l2 num of blks per alloc group */
#define db_agsize db_bmap.dn_agsize
#define db_agl2size db_bmap.dn_agl2size
#define db_agwidth db_bmap.dn_agwidth
-#define db_agheigth db_bmap.dn_agheigth
+#define db_agheight db_bmap.dn_agheight
#define db_agstart db_bmap.dn_agstart
#define db_numag db_bmap.dn_numag
#define db_maxlevel db_bmap.dn_maxlevel
extern const struct inode_operations jfs_file_inode_operations;
extern const struct file_operations jfs_file_operations;
extern const struct inode_operations jfs_symlink_inode_operations;
+extern const struct inode_operations jfs_fast_symlink_inode_operations;
extern const struct dentry_operations jfs_ci_dentry_operations;
#endif /* _H_JFS_INODE */
*/
if (ssize <= IDATASIZE) {
- ip->i_op = &jfs_symlink_inode_operations;
+ ip->i_op = &jfs_fast_symlink_inode_operations;
i_fastsymlink = JFS_IP(ip)->i_inline;
memcpy(i_fastsymlink, name, ssize);
else {
jfs_info("jfs_symlink: allocate extent ip:0x%p", ip);
- ip->i_op = &page_symlink_inode_operations;
+ ip->i_op = &jfs_symlink_inode_operations;
ip->i_mapping->a_ops = &jfs_aops;
/*
struct inode *iplist[1];
struct jfs_superblock *j_sb, *j_sb2;
uint old_agsize;
+ int agsizechanged = 0;
struct buffer_head *bh, *bh2;
/* If the volume hasn't grown, get out now */
*/
if ((rc = dbExtendFS(ipbmap, XAddress, nblocks)))
goto error_out;
+
+ agsizechanged |= (bmp->db_agsize != old_agsize);
+
/*
* the map now has extended to cover additional nblocks:
* dn_mapsize = oldMapsize + nblocks;
* will correctly identify the new ag);
*/
/* if new AG size the same as old AG size, done! */
- if (bmp->db_agsize != old_agsize) {
+ if (agsizechanged) {
if ((rc = diExtendFS(ipimap, ipbmap)))
goto error_out;
return NULL;
}
-const struct inode_operations jfs_symlink_inode_operations = {
+const struct inode_operations jfs_fast_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = jfs_follow_link,
+ .setattr = jfs_setattr,
+ .setxattr = jfs_setxattr,
+ .getxattr = jfs_getxattr,
+ .listxattr = jfs_listxattr,
+ .removexattr = jfs_removexattr,
+};
+
+const struct inode_operations jfs_symlink_inode_operations = {
+ .readlink = generic_readlink,
+ .follow_link = page_follow_link_light,
+ .put_link = page_put_link,
+ .setattr = jfs_setattr,
.setxattr = jfs_setxattr,
.getxattr = jfs_getxattr,
.listxattr = jfs_listxattr,
struct logfs_block *block;
int round, progress, last_progress = 0;
+ /*
+ * Doing too many changes to the segfile at once would result
+ * in a large number of aliases. Write the journal before
+ * things get out of hand.
+ */
+ if (super->s_shadow_tree.no_shadowed_segments >= MAX_OBJ_ALIASES)
+ logfs_write_anchor(sb);
+
if (no_free_segments(sb) >= target &&
super->s_no_object_aliases < MAX_OBJ_ALIASES)
return;
static int journal_erase_segment(struct logfs_area *area)
{
struct super_block *sb = area->a_sb;
- struct logfs_segment_header sh;
+ union {
+ struct logfs_segment_header sh;
+ unsigned char c[ALIGN(sizeof(struct logfs_segment_header), 16)];
+ } u;
u64 ofs;
int err;
if (err)
return err;
- sh.pad = 0;
- sh.type = SEG_JOURNAL;
- sh.level = 0;
- sh.segno = cpu_to_be32(area->a_segno);
- sh.ec = cpu_to_be32(area->a_erase_count);
- sh.gec = cpu_to_be64(logfs_super(sb)->s_gec);
- sh.crc = logfs_crc32(&sh, sizeof(sh), 4);
+ memset(&u, 0, sizeof(u));
+ u.sh.pad = 0;
+ u.sh.type = SEG_JOURNAL;
+ u.sh.level = 0;
+ u.sh.segno = cpu_to_be32(area->a_segno);
+ u.sh.ec = cpu_to_be32(area->a_erase_count);
+ u.sh.gec = cpu_to_be64(logfs_super(sb)->s_gec);
+ u.sh.crc = logfs_crc32(&u.sh, sizeof(u.sh), 4);
/* This causes a bug in segment.c. Not yet. */
//logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count, 0);
ofs = dev_ofs(sb, area->a_segno, 0);
- area->a_used_bytes = ALIGN(sizeof(sh), 16);
- logfs_buf_write(area, ofs, &sh, sizeof(sh));
+ area->a_used_bytes = sizeof(u);
+ logfs_buf_write(area, ofs, &u, sizeof(u));
return 0;
}
btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow);
btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow);
+ btree_grim_visitor32(&tree->segment_map, 0, NULL);
+ tree->no_shadowed_segments = 0;
if (li->li_block) {
/*
if (len == 0)
return logfs_write_header(super, header, 0, type);
+ BUG_ON(len > sb->s_blocksize);
compr_len = logfs_compress(buf, data, len, sb->s_blocksize);
if (compr_len < 0 || type == JE_ANCHOR) {
- BUG_ON(len > sb->s_blocksize);
memcpy(data, buf, len);
compr_len = len;
compr = COMPR_NONE;
if (ofs < 0)
return ofs;
logfs_buf_write(area, ofs, super->s_compressed_je, len);
+ BUG_ON(super->s_no_je >= MAX_JOURNAL_ENTRIES);
super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs);
return 0;
}
* struct shadow_tree
* @new: shadows where old_ofs==0, indexed by new_ofs
* @old: shadows where old_ofs!=0, indexed by old_ofs
+ * @segment_map: bitfield of segments containing shadows
+ * @no_shadowed_segment: number of segments containing shadows
*/
struct shadow_tree {
struct btree_head64 new;
struct btree_head64 old;
+ struct btree_head32 segment_map;
+ int no_shadowed_segments;
};
struct object_alias_item {
level_t level, int child_no, __be64 val);
struct logfs_block_ops {
void (*write_block)(struct logfs_block *block);
- gc_level_t (*block_level)(struct logfs_block *block);
void (*free_block)(struct super_block *sb, struct logfs_block*block);
int (*write_alias)(struct super_block *sb,
struct logfs_block *block,
write_alias_t *write_one_alias);
};
+#define MAX_JOURNAL_ENTRIES 256
+
struct logfs_super {
struct mtd_info *s_mtd; /* underlying device */
struct block_device *s_bdev; /* underlying device */
u32 s_journal_ec[LOGFS_JOURNAL_SEGS]; /* journal erasecounts */
u64 s_last_version;
struct logfs_area *s_journal_area; /* open journal segment */
- __be64 s_je_array[64];
+ __be64 s_je_array[MAX_JOURNAL_ENTRIES];
int s_no_je;
int s_sum_index; /* for the 12 summaries */
return logfs_super(sb)->s_area[(__force u8)gc_level];
}
+static inline void logfs_mempool_destroy(mempool_t *pool)
+{
+ if (pool)
+ mempool_destroy(pool);
+}
+
#endif
}
}
-static gc_level_t inode_block_level(struct logfs_block *block)
-{
- BUG_ON(block->inode->i_ino == LOGFS_INO_MASTER);
- return GC_LEVEL(LOGFS_MAX_LEVELS);
-}
-
-static gc_level_t indirect_block_level(struct logfs_block *block)
-{
- struct page *page;
- struct inode *inode;
- u64 bix;
- level_t level;
-
- page = block->page;
- inode = page->mapping->host;
- logfs_unpack_index(page->index, &bix, &level);
- return expand_level(inode->i_ino, level);
-}
-
/*
* This silences a false, yet annoying gcc warning. I hate it when my editor
* jumps into bitops.h each time I recompile this file.
static struct logfs_block_ops inode_block_ops = {
.write_block = inode_write_block,
- .block_level = inode_block_level,
.free_block = inode_free_block,
.write_alias = inode_write_alias,
};
struct logfs_block_ops indirect_block_ops = {
.write_block = indirect_write_block,
- .block_level = indirect_block_level,
.free_block = indirect_free_block,
.write_alias = indirect_write_alias,
};
mempool_free(shadow, super->s_shadow_pool);
}
+static void mark_segment(struct shadow_tree *tree, u32 segno)
+{
+ int err;
+
+ if (!btree_lookup32(&tree->segment_map, segno)) {
+ err = btree_insert32(&tree->segment_map, segno, (void *)1,
+ GFP_NOFS);
+ BUG_ON(err);
+ tree->no_shadowed_segments++;
+ }
+}
+
/**
* fill_shadow_tree - Propagate shadow tree changes due to a write
* @inode: Inode owning the page
super->s_dirty_used_bytes += shadow->new_len;
super->s_dirty_free_bytes += shadow->old_len;
+ mark_segment(tree, shadow->old_ofs >> super->s_segshift);
+ mark_segment(tree, shadow->new_ofs >> super->s_segshift);
}
}
return logfs_truncate_direct(inode, size);
}
-int logfs_truncate(struct inode *inode, u64 size)
+/*
+ * Truncate, by changing the segment file, can consume a fair amount
+ * of resources. So back off from time to time and do some GC.
+ * 8 or 2048 blocks should be well within safety limits even if
+ * every single block resided in a different segment.
+ */
+#define TRUNCATE_STEP (8 * 1024 * 1024)
+int logfs_truncate(struct inode *inode, u64 target)
{
struct super_block *sb = inode->i_sb;
- int err;
+ u64 size = i_size_read(inode);
+ int err = 0;
- logfs_get_wblocks(sb, NULL, 1);
- err = __logfs_truncate(inode, size);
- if (!err)
- err = __logfs_write_inode(inode, 0);
- logfs_put_wblocks(sb, NULL, 1);
+ size = ALIGN(size, TRUNCATE_STEP);
+ while (size > target) {
+ if (size > TRUNCATE_STEP)
+ size -= TRUNCATE_STEP;
+ else
+ size = 0;
+ if (size < target)
+ size = target;
+
+ logfs_get_wblocks(sb, NULL, 1);
+ err = __logfs_truncate(inode, target);
+ if (!err)
+ err = __logfs_write_inode(inode, 0);
+ logfs_put_wblocks(sb, NULL, 1);
+ }
if (!err)
- err = vmtruncate(inode, size);
+ err = vmtruncate(inode, target);
/* I don't trust error recovery yet. */
WARN_ON(err);
struct logfs_super *super = logfs_super(sb);
destroy_meta_inode(super->s_segfile_inode);
- if (super->s_block_pool)
- mempool_destroy(super->s_block_pool);
- if (super->s_shadow_pool)
- mempool_destroy(super->s_shadow_pool);
+ logfs_mempool_destroy(super->s_block_pool);
+ logfs_mempool_destroy(super->s_shadow_pool);
}
return 0;
}
-static gc_level_t btree_block_level(struct logfs_block *block)
-{
- return expand_level(block->ino, block->level);
-}
-
static struct logfs_block_ops btree_block_ops = {
.write_block = btree_write_block,
- .block_level = btree_block_level,
.free_block = __free_block,
.write_alias = btree_write_alias,
};
for (i--; i >= 0; i--)
free_area(super->s_area[i]);
free_area(super->s_journal_area);
- mempool_destroy(super->s_alias_pool);
+ logfs_mempool_destroy(super->s_alias_pool);
return -ENOMEM;
}
#include "logfs.h"
#include <linux/bio.h>
#include <linux/slab.h>
+#include <linux/blkdev.h>
#include <linux/mtd/mtd.h>
#include <linux/statfs.h>
#include <linux/buffer_head.h>
sb->s_fs_info = super;
sb->s_mtd = super->s_mtd;
sb->s_bdev = super->s_bdev;
+ if (sb->s_bdev)
+ sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info;
+ if (sb->s_mtd)
+ sb->s_bdi = sb->s_mtd->backing_dev_info;
return 0;
}
btree_init_mempool64(&super->s_shadow_tree.new, super->s_btree_pool);
btree_init_mempool64(&super->s_shadow_tree.old, super->s_btree_pool);
+ btree_init_mempool32(&super->s_shadow_tree.segment_map,
+ super->s_btree_pool);
ret = logfs_init_mapping(sb);
if (ret)
if (super->s_erase_page)
__free_page(super->s_erase_page);
super->s_devops->put_device(sb);
- mempool_destroy(super->s_btree_pool);
- mempool_destroy(super->s_alias_pool);
+ logfs_mempool_destroy(super->s_btree_pool);
+ logfs_mempool_destroy(super->s_alias_pool);
kfree(super);
log_super("LogFS: Finished unmounting\n");
}
struct reiserfs_de_head *deh)
{
struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root;
- if (reiserfs_expose_privroot(dir->d_sb))
- return 0;
return (dir == dir->d_parent && privroot->d_inode &&
deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid);
}
if (!err && new_size < i_size_read(dentry->d_inode)) {
struct iattr newattrs = {
.ia_ctime = current_fs_time(inode->i_sb),
- .ia_size = buffer_size,
+ .ia_size = new_size,
.ia_valid = ATTR_SIZE | ATTR_CTIME,
};
return generic_permission(inode, mask, NULL);
}
-/* This will catch lookups from the fs root to .reiserfs_priv */
-static int
-xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name)
+static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
{
- struct dentry *priv_root = REISERFS_SB(dentry->d_sb)->priv_root;
- if (container_of(q1, struct dentry, d_name) == priv_root)
- return -ENOENT;
- if (q1->len == name->len &&
- !memcmp(q1->name, name->name, name->len))
- return 0;
- return 1;
+ return -EPERM;
}
static const struct dentry_operations xattr_lookup_poison_ops = {
- .d_compare = xattr_lookup_poison,
+ .d_revalidate = xattr_hide_revalidate,
};
int reiserfs_lookup_privroot(struct super_block *s)
strlen(PRIVROOT_NAME));
if (!IS_ERR(dentry)) {
REISERFS_SB(s)->priv_root = dentry;
- if (!reiserfs_expose_privroot(s))
- s->s_root->d_op = &xattr_lookup_poison_ops;
+ dentry->d_op = &xattr_lookup_poison_ops;
if (dentry->d_inode)
dentry->d_inode->i_flags |= S_PRIVATE;
} else
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
unsigned long arg);
extern int __generic_block_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo, u64 start,
- u64 len, get_block_t *get_block);
+ struct fiemap_extent_info *fieinfo,
+ loff_t start, loff_t len,
+ get_block_t *get_block);
extern int generic_block_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo, u64 start,
u64 len, get_block_t *get_block);
*/
struct kvm_io_bus {
int dev_count;
-#define NR_IOBUS_DEVS 6
+#define NR_IOBUS_DEVS 200
struct kvm_io_device *devs[NR_IOBUS_DEVS];
};
int user_alloc;
};
+static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
+{
+ return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+}
+
struct kvm_kernel_irq_routing_entry {
u32 gsi;
u32 type;
#define POISON_FREE 0x6b /* for use-after-free poisoning */
#define POISON_END 0xa5 /* end-byte of poisoning */
+/********** mm/hugetlb.c **********/
+/*
+ * Private mappings of hugetlb pages use this poisoned value for
+ * page->mapping. The core VM should not be doing anything with this mapping
+ * but futex requires the existence of some page->mapping value even though it
+ * is unused if PAGE_MAPPING_ANON is set.
+ */
+#define HUGETLB_POISON ((void *)(0x00300300 + POISON_POINTER_DELTA + PAGE_MAPPING_ANON))
+
/********** arch/$ARCH/mm/init.c **********/
#define POISON_FREE_INITMEM 0xcc
{
/* Nothing except the stubbed out regulator API should be
* looking at the value except to check if it is an error
- * value so the actual return value doesn't matter.
+ * value. Drivers are free to handle NULL specifically by
+ * skipping all regulator API calls, but they don't have to.
+ * Drivers which don't, should make sure they properly handle
+ * corner cases of the API, such as regulator_get_voltage()
+ * returning 0.
*/
- return (struct regulator *)id;
+ return NULL;
}
static inline void regulator_put(struct regulator *regulator)
{
compress_name);
message = msg_buf;
}
- }
+ } else
+ error("junk in compressed archive");
if (state != Reset)
error("junk in compressed archive");
this_header = saved_offset + my_inptr;
error:
put_cred(new);
+ return NULL;
+
free_tgcred:
#ifdef CONFIG_KEYS
kfree(tgcred);
{
if (cred->magic != CRED_MAGIC)
return true;
- if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
- return true;
#ifdef CONFIG_SECURITY_SELINUX
if (selinux_is_enabled()) {
if ((unsigned long) cred->security < PAGE_SIZE)
#ifdef COMPAT_UTS_MACHINE
#define override_architecture(name) \
- (current->personality == PER_LINUX32 && \
+ (personality(current->personality) == PER_LINUX32 && \
copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
sizeof(COMPAT_UTS_MACHINE)))
#else
u32 src_len, dst_len;
size_t tmp;
u8 *in_buf, *in_buf_save, *out_buf;
- int obytes_processed = 0;
+ int ret = -1;
set_error_fn(error_fn);
/* decompress */
tmp = dst_len;
- r = lzo1x_decompress_safe((u8 *) in_buf, src_len,
+
+ /* When the input data is not compressed at all,
+ * lzo1x_decompress_safe will fail, so call memcpy()
+ * instead */
+ if (unlikely(dst_len == src_len))
+ memcpy(out_buf, in_buf, src_len);
+ else {
+ r = lzo1x_decompress_safe((u8 *) in_buf, src_len,
out_buf, &tmp);
- if (r != LZO_E_OK || dst_len != tmp) {
- error("Compressed data violation");
- goto exit_2;
+ if (r != LZO_E_OK || dst_len != tmp) {
+ error("Compressed data violation");
+ goto exit_2;
+ }
}
- obytes_processed += dst_len;
if (flush)
flush(out_buf, dst_len);
if (output)
in_buf += src_len;
}
+ ret = 0;
exit_2:
if (!input)
free(in_buf);
if (!output)
free(out_buf);
exit:
- return obytes_processed;
+ return ret;
}
#define decompress unlzo
ret->element_size = element_size;
ret->total_nr_elements = total;
if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
- memset(ret->parts[0], FLEX_ARRAY_FREE,
+ memset(&ret->parts[0], FLEX_ARRAY_FREE,
FLEX_ARRAY_BASE_BYTES_LEFT);
return ret;
}
return simple_strtoull(cp, endp, base);
}
+EXPORT_SYMBOL(simple_strtoll);
/**
* strict_strtoul - convert a string to an unsigned long strictly
mapping = (struct address_space *) page_private(page);
set_page_private(page, 0);
+ page->mapping = NULL;
BUG_ON(page_count(page));
INIT_LIST_HEAD(&page->lru);
spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(h);
spin_unlock(&inode->i_lock);
- } else
+ } else {
lock_page(page);
+ page->mapping = HUGETLB_POISON;
+ }
}
/*
do {
cond_resched();
page = follow_page(vma, addr, FOLL_GET);
- if (!page)
+ if (IS_ERR_OR_NULL(page))
break;
if (PageKsm(page))
ret = handle_mm_fault(vma->vm_mm, vma, addr,
goto out;
page = follow_page(vma, addr, FOLL_GET);
- if (!page)
+ if (IS_ERR_OR_NULL(page))
goto out;
if (PageAnon(page)) {
flush_anon_page(vma, page, addr);
cond_resched();
tree_rmap_item = rb_entry(*new, struct rmap_item, node);
tree_page = get_mergeable_page(tree_rmap_item);
- if (!tree_page)
+ if (IS_ERR_OR_NULL(tree_page))
return NULL;
/*
if (ksm_test_exit(mm))
break;
*page = follow_page(vma, ksm_scan.address, FOLL_GET);
- if (*page && PageAnon(*page)) {
+ if (!IS_ERR_OR_NULL(*page) && PageAnon(*page)) {
flush_anon_page(vma, *page, ksm_scan.address);
flush_dcache_page(*page);
rmap_item = get_next_rmap_item(slot,
up_read(&mm->mmap_sem);
return rmap_item;
}
- if (*page)
+ if (!IS_ERR_OR_NULL(*page))
put_page(*page);
ksm_scan.address += PAGE_SIZE;
cond_resched();
static void ksm_do_scan(unsigned int scan_npages)
{
struct rmap_item *rmap_item;
- struct page *page;
+ struct page *uninitialized_var(page);
while (scan_npages--) {
cond_resched();
}
unlock_page_cgroup(pc);
+ *ptr = mem;
if (mem) {
- ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
+ ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
css_put(&mem->css);
}
- *ptr = mem;
return ret;
}
goto out_enomem_free_avc;
allocated = anon_vma;
}
- spin_lock(&anon_vma->lock);
+ spin_lock(&anon_vma->lock);
/* page_table_lock to protect against threads */
spin_lock(&mm->page_table_lock);
if (likely(!vma->anon_vma)) {
list_add(&avc->same_vma, &vma->anon_vma_chain);
list_add(&avc->same_anon_vma, &anon_vma->head);
allocated = NULL;
+ avc = NULL;
}
spin_unlock(&mm->page_table_lock);
-
spin_unlock(&anon_vma->lock);
- if (unlikely(allocated)) {
+
+ if (unlikely(allocated))
anon_vma_free(allocated);
+ if (unlikely(avc))
anon_vma_chain_free(avc);
- }
}
return 0;
mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
- if (!IS_ERR(dentry)) {
+ if (!IS_ERR(*dentry)) {
if ((mode & S_IFMT) == S_IFDIR)
error = mkdir(parent->d_inode, *dentry, mode);
else
error = create(parent->d_inode, *dentry, mode);
} else
- error = PTR_ERR(dentry);
+ error = PTR_ERR(*dentry);
mutex_unlock(&parent->d_inode->i_mutex);
return error;
{
const struct cred *cred = current_cred();
key_serial_t prkey, sskey;
- struct key *key = cons->key, *authkey = cons->authkey, *keyring;
+ struct key *key = cons->key, *authkey = cons->authkey, *keyring,
+ *session;
char *argv[9], *envp[3], uid_str[12], gid_str[12];
char key_str[12], keyring_str[3][12];
char desc[20];
if (cred->tgcred->process_keyring)
prkey = cred->tgcred->process_keyring->serial;
- if (cred->tgcred->session_keyring)
- sskey = rcu_dereference(cred->tgcred->session_keyring)->serial;
- else
- sskey = cred->user->session_keyring->serial;
+ rcu_read_lock();
+ session = rcu_dereference(cred->tgcred->session_keyring);
+ if (!session)
+ session = cred->user->session_keyring;
+ sskey = session->serial;
+ rcu_read_unlock();
sprintf(keyring_str[2], "%d", sskey);
SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB),
SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
{}
};
ad198x_power_eapd(codec);
return 0;
}
-
-static int ad198x_resume(struct hda_codec *codec)
-{
- ad198x_init(codec);
- snd_hda_codec_resume_amp(codec);
- snd_hda_codec_resume_cache(codec);
- return 0;
-}
#endif
static struct hda_codec_ops ad198x_patch_ops = {
#endif
#ifdef SND_HDA_NEEDS_RESUME
.suspend = ad198x_suspend,
- .resume = ad198x_resume,
#endif
.reboot_notify = ad198x_shutup,
};
SND_PCI_QUIRK(0x1695, 0x4012, "EPox EP-5LDA", ALC880_5ST_DIG),
SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_F1734),
SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FUJITSU),
- SND_PCI_QUIRK(0x1734, 0x10ac, "FSC", ALC880_UNIWILL),
+ SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_F1734),
SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU),
SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW),
SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG),
"Dell Studio 1555", STAC_DELL_M6_DMIC),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
"Dell Studio 1557", STAC_DELL_M6_DMIC),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
+ "Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
+ "Dell Studio 1558", STAC_DELL_M6_BOTH),
{} /* terminator */
};
struct snd_kcontrol *master_switch;
struct snd_kcontrol *master_volume;
struct tasklet_struct hwvol_tq;
+ unsigned int in_suspend;
#ifdef CONFIG_PM
u16 *suspend_mem;
MODULE_DEVICE_TABLE(pci, snd_m3_ids);
static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = {
+ SND_PCI_QUIRK(0x0E11, 0x0094, "Compaq Evo N600c", 0x0c),
SND_PCI_QUIRK(0x10f7, 0x833e, "Panasonic CF-28", 0x0d),
SND_PCI_QUIRK(0x10f7, 0x833d, "Panasonic CF-72", 0x0d),
SND_PCI_QUIRK(0x1033, 0x80f1, "NEC LM800J/7", 0x03),
outb(0x88, chip->iobase + SHADOW_MIX_REG_MASTER);
outb(0x88, chip->iobase + HW_VOL_COUNTER_MASTER);
+ /* Ignore spurious HV interrupts during suspend / resume, this avoids
+ mistaking them for a mute button press. */
+ if (chip->in_suspend)
+ return;
+
if (!chip->master_switch || !chip->master_volume)
return;
if (chip->suspend_mem == NULL)
return 0;
+ chip->in_suspend = 1;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
snd_pcm_suspend_all(chip->pcm);
snd_ac97_suspend(chip->ac97);
snd_m3_hv_init(chip);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+ chip->in_suspend = 0;
return 0;
}
#endif /* CONFIG_PM */
struct mm_struct *mm)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
kvm_arch_flush_shadow(kvm);
+ srcu_read_unlock(&kvm->srcu, idx);
}
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
/* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
- unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
+ unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
new.dirty_bitmap = vmalloc(dirty_bytes);
if (!new.dirty_bitmap)
{
struct kvm_memory_slot *memslot;
int r, i;
- int n;
+ unsigned long n;
unsigned long any = 0;
r = -EINVAL;
if (!memslot->dirty_bitmap)
goto out;
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ n = kvm_dirty_bitmap_bytes(memslot);
for (i = 0; !any && i < n/sizeof(long); ++i)
any = memslot->dirty_bitmap[i];
memslot = gfn_to_memslot_unaliased(kvm, gfn);
if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
+ unsigned long *p = memslot->dirty_bitmap +
+ rel_gfn / BITS_PER_LONG;
+ int offset = rel_gfn % BITS_PER_LONG;
/* avoid RMW */
- if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap))
- generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
+ if (!generic_test_le_bit(offset, p))
+ generic___set_le_bit(offset, p);
}
}