* reg_10
bit 7 6 5 4 3 2 1 0
- 0 0 0 0 0 0 0 A
+ 0 0 0 0 R F T A
A: 1 = enable absolute tracking
+ T: 1 = enable two finger mode auto correct
+ F: 1 = disable ABS Position Filter
+ R: 1 = enable real hardware resolution
6.2 Native absolute mode 6 byte packet format
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
もし、2.6.x.y カーネルが存在しない場合には、番号が一番大きい 2.6.x が
最新の安定版カーネルです。
-2.6.x.y は "stable" チーム <stable@kernel.org> でメンテされており、必
+2.6.x.y は "stable" チーム <stable@vger.kernel.org> でメンテされており、必
要に応じてリリースされます。通常のリリース期間は 2週間毎ですが、差し迫っ
た問題がなければもう少し長くなることもあります。セキュリティ関連の問題
の場合はこれに対してだいたいの場合、すぐにリリースがされます。
-stable ツリーにパッチを送付する手続き-
- - 上記の規則に従っているかを確認した後に、stable@kernel.org にパッチ
+ - 上記の規則に従っているかを確認した後に、stable@vger.kernel.org にパッチ
を送る。
- 送信者はパッチがキューに受け付けられた際には ACK を、却下された場合
には NAK を受け取る。この反応は開発者たちのスケジュールによって、数
日かかる場合がある。
- もし受け取られたら、パッチは他の開発者たちと関連するサブシステムの
メンテナーによるレビューのために -stable キューに追加される。
- - パッチに stable@kernel.org のアドレスが付加されているときには、それ
+ - パッチに stable@vger.kernel.org のアドレスが付加されているときには、それ
が Linus のツリーに入る時に自動的に stable チームに email される。
- - セキュリティパッチはこのエイリアス (stable@kernel.org) に送られるべ
+ - セキュリティパッチはこのエイリアス (stable@vger.kernel.org) に送られるべ
きではなく、代わりに security@kernel.org のアドレスに送られる。
レビューサイクル-
如果没有2.6.x.y版本内核存在,那么最新的2.6.x版本内核就相当于是当前的稳定
版内核。
-2.6.x.y版本由“稳定版”小组(邮件地址<stable@kernel.org>)维护,一般隔周发
+2.6.x.y版本由“稳定版”小组(邮件地址<stable@vger.kernel.org>)维护,一般隔周发
布新版本。
内核源码中的Documentation/stable_kernel_rules.txt文件具体描述了可被稳定
向稳定版代码树提交补丁的过程:
- - 在确认了补丁符合以上的规则后,将补丁发送到stable@kernel.org。
+ - 在确认了补丁符合以上的规则后,将补丁发送到stable@vger.kernel.org。
- 如果补丁被接受到队列里,发送者会收到一个ACK回复,如果没有被接受,收
到的是NAK回复。回复需要几天的时间,这取决于开发者的时间安排。
- 被接受的补丁会被加到稳定版本队列里,等待其他开发者的审查。
VERSION = 3
PATCHLEVEL = 2
-SUBLEVEL = 58
+SUBLEVEL = 61
EXTRAVERSION =
NAME = Saber-toothed Squirrel
/* Select the best insn combination to perform the */ \
/* actual __m * __n / (__p << 64) operation. */ \
if (!__c) { \
- asm ( "umull %Q0, %R0, %1, %Q2\n\t" \
+ asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \
"mov %Q0, #0" \
: "=&r" (__res) \
: "r" (__m), "r" (__n) \
#define put_user(x,p) \
({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
+ const typeof(*(p)) __user *__tmp_p = (p); \
register const typeof(*(p)) __r2 asm("r2") = (x); \
- register const typeof(*(p)) __user *__p asm("r0") = (p);\
+ register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
if (!csize)
return 0;
- vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE);
if (!vaddr)
return -ENOMEM;
pdev = platform_device_alloc("mx3-camera", 0);
if (!pdev)
- goto err;
+ return ERR_PTR(-ENOMEM);
pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
if (!pdev->dev.dma_mask)
#define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
+#define _TIF_WORK_SYSCALL_ENTRY (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP)
+
/* work to do in syscall_trace_leave() */
#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
board_bind_eic_interrupt = &msc_bind_eic_interrupt;
- for (; nirq >= 0; nirq--, imp++) {
+ for (; nirq > 0; nirq--, imp++) {
int n = imp->im_irq;
switch (imp->im_type) {
stack_done:
lw t0, TI_FLAGS($28) # syscall tracing enabled?
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ li t1, _TIF_WORK_SYSCALL_ENTRY
and t0, t1
bnez t0, syscall_trace_entry # -> yes
sd a3, PT_R26(sp) # save a3 for syscall restarting
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
bnez t0, syscall_trace_entry
sd a3, PT_R26(sp) # save a3 for syscall restarting
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
bnez t0, n32_syscall_trace_entry
PTR 4b, bad_stack
.previous
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
bnez t0, trace_a_syscall
ENTRY_COMP(vmsplice)
ENTRY_COMP(move_pages) /* 295 */
ENTRY_SAME(getcpu)
- ENTRY_SAME(epoll_pwait)
+ ENTRY_COMP(epoll_pwait)
ENTRY_COMP(statfs64)
ENTRY_COMP(fstatfs64)
ENTRY_COMP(kexec_load) /* 300 */
CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=no -mcall-aixdesc
CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
-KBUILD_CPPFLAGS += -Iarch/$(ARCH)
+asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
+
+KBUILD_CPPFLAGS += -Iarch/$(ARCH) $(asinstr)
KBUILD_AFLAGS += -Iarch/$(ARCH)
KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
CPP = $(CC) -E $(KBUILD_CFLAGS)
* ld rY,ADDROFF(name)(rX)
*/
#ifdef __powerpc64__
+#ifdef HAVE_AS_ATHIGH
+#define __AS_ATHIGH high
+#else
+#define __AS_ATHIGH h
+#endif
#define LOAD_REG_IMMEDIATE(reg,expr) \
lis (reg),(expr)@highest; \
ori (reg),(reg),(expr)@higher; \
rldicr (reg),(reg),32,31; \
- oris (reg),(reg),(expr)@h; \
+ oris reg,reg,(expr)@__AS_ATHIGH; \
ori (reg),(reg),(expr)@l;
#define LOAD_REG_ADDR(reg,name) \
static unsigned int legacy_serial_count;
static int legacy_serial_console = -1;
+static const upf_t legacy_port_flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
+ UPF_SHARE_IRQ | UPF_FIXED_PORT;
+
static unsigned int tsi_serial_in(struct uart_port *p, int offset)
{
unsigned int tmp;
{
u64 addr;
const u32 *addrp;
- upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ
- | UPF_FIXED_PORT;
struct device_node *tsi = of_get_parent(np);
/* We only support ports that have a clock frequency properly
* IO port value. It will be fixed up later along with the irq
*/
if (tsi && !strcmp(tsi->type, "tsi-bridge"))
- return add_legacy_port(np, -1, UPIO_TSI, addr, addr, NO_IRQ, flags, 0);
+ return add_legacy_port(np, -1, UPIO_TSI, addr, addr,
+ NO_IRQ, legacy_port_flags, 0);
else
- return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0);
+ return add_legacy_port(np, -1, UPIO_MEM, addr, addr,
+ NO_IRQ, legacy_port_flags, 0);
}
static int __init add_legacy_isa_port(struct device_node *np,
/* Add port, irq will be dealt with later */
return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]), taddr,
- NO_IRQ, UPF_BOOT_AUTOCONF, 0);
+ NO_IRQ, legacy_port_flags, 0);
}
* IO port value. It will be fixed up later along with the irq
*/
return add_legacy_port(np, index, iotype, base, addr, NO_IRQ,
- UPF_BOOT_AUTOCONF, np != pci_dev);
+ legacy_port_flags, np != pci_dev);
}
#endif
for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
DBG(" thread %d -> cpu %d (hard id %d)\n",
j, cpu, intserv[j]);
- set_cpu_present(cpu, true);
+ set_cpu_present(cpu, of_device_is_available(dn));
set_hard_smp_processor_id(cpu, intserv[j]);
set_cpu_possible(cpu, true);
cpu++;
mr 1,11
blr
+#ifdef CONFIG_ALTIVEC
+/* Called with r0 pointing just beyond the end of the vector save area. */
+
+_GLOBAL(_savevr_20)
+ li r11,-192
+ stvx vr20,r11,r0
+_GLOBAL(_savevr_21)
+ li r11,-176
+ stvx vr21,r11,r0
+_GLOBAL(_savevr_22)
+ li r11,-160
+ stvx vr22,r11,r0
+_GLOBAL(_savevr_23)
+ li r11,-144
+ stvx vr23,r11,r0
+_GLOBAL(_savevr_24)
+ li r11,-128
+ stvx vr24,r11,r0
+_GLOBAL(_savevr_25)
+ li r11,-112
+ stvx vr25,r11,r0
+_GLOBAL(_savevr_26)
+ li r11,-96
+ stvx vr26,r11,r0
+_GLOBAL(_savevr_27)
+ li r11,-80
+ stvx vr27,r11,r0
+_GLOBAL(_savevr_28)
+ li r11,-64
+ stvx vr28,r11,r0
+_GLOBAL(_savevr_29)
+ li r11,-48
+ stvx vr29,r11,r0
+_GLOBAL(_savevr_30)
+ li r11,-32
+ stvx vr30,r11,r0
+_GLOBAL(_savevr_31)
+ li r11,-16
+ stvx vr31,r11,r0
+ blr
+
+_GLOBAL(_restvr_20)
+ li r11,-192
+ lvx vr20,r11,r0
+_GLOBAL(_restvr_21)
+ li r11,-176
+ lvx vr21,r11,r0
+_GLOBAL(_restvr_22)
+ li r11,-160
+ lvx vr22,r11,r0
+_GLOBAL(_restvr_23)
+ li r11,-144
+ lvx vr23,r11,r0
+_GLOBAL(_restvr_24)
+ li r11,-128
+ lvx vr24,r11,r0
+_GLOBAL(_restvr_25)
+ li r11,-112
+ lvx vr25,r11,r0
+_GLOBAL(_restvr_26)
+ li r11,-96
+ lvx vr26,r11,r0
+_GLOBAL(_restvr_27)
+ li r11,-80
+ lvx vr27,r11,r0
+_GLOBAL(_restvr_28)
+ li r11,-64
+ lvx vr28,r11,r0
+_GLOBAL(_restvr_29)
+ li r11,-48
+ lvx vr29,r11,r0
+_GLOBAL(_restvr_30)
+ li r11,-32
+ lvx vr30,r11,r0
+_GLOBAL(_restvr_31)
+ li r11,-16
+ lvx vr31,r11,r0
+ blr
+
+#endif /* CONFIG_ALTIVEC */
+
#else /* CONFIG_PPC64 */
.globl _savegpr0_14
mtlr r0
blr
+#ifdef CONFIG_ALTIVEC
+/* Called with r0 pointing just beyond the end of the vector save area. */
+
+.globl _savevr_20
+_savevr_20:
+ li r12,-192
+ stvx vr20,r12,r0
+.globl _savevr_21
+_savevr_21:
+ li r12,-176
+ stvx vr21,r12,r0
+.globl _savevr_22
+_savevr_22:
+ li r12,-160
+ stvx vr22,r12,r0
+.globl _savevr_23
+_savevr_23:
+ li r12,-144
+ stvx vr23,r12,r0
+.globl _savevr_24
+_savevr_24:
+ li r12,-128
+ stvx vr24,r12,r0
+.globl _savevr_25
+_savevr_25:
+ li r12,-112
+ stvx vr25,r12,r0
+.globl _savevr_26
+_savevr_26:
+ li r12,-96
+ stvx vr26,r12,r0
+.globl _savevr_27
+_savevr_27:
+ li r12,-80
+ stvx vr27,r12,r0
+.globl _savevr_28
+_savevr_28:
+ li r12,-64
+ stvx vr28,r12,r0
+.globl _savevr_29
+_savevr_29:
+ li r12,-48
+ stvx vr29,r12,r0
+.globl _savevr_30
+_savevr_30:
+ li r12,-32
+ stvx vr30,r12,r0
+.globl _savevr_31
+_savevr_31:
+ li r12,-16
+ stvx vr31,r12,r0
+ blr
+
+.globl _restvr_20
+_restvr_20:
+ li r12,-192
+ lvx vr20,r12,r0
+.globl _restvr_21
+_restvr_21:
+ li r12,-176
+ lvx vr21,r12,r0
+.globl _restvr_22
+_restvr_22:
+ li r12,-160
+ lvx vr22,r12,r0
+.globl _restvr_23
+_restvr_23:
+ li r12,-144
+ lvx vr23,r12,r0
+.globl _restvr_24
+_restvr_24:
+ li r12,-128
+ lvx vr24,r12,r0
+.globl _restvr_25
+_restvr_25:
+ li r12,-112
+ lvx vr25,r12,r0
+.globl _restvr_26
+_restvr_26:
+ li r12,-96
+ lvx vr26,r12,r0
+.globl _restvr_27
+_restvr_27:
+ li r12,-80
+ lvx vr27,r12,r0
+.globl _restvr_28
+_restvr_28:
+ li r12,-64
+ lvx vr28,r12,r0
+.globl _restvr_29
+_restvr_29:
+ li r12,-48
+ lvx vr29,r12,r0
+.globl _restvr_30
+_restvr_30:
+ li r12,-32
+ lvx vr30,r12,r0
+.globl _restvr_31
+_restvr_31:
+ li r12,-16
+ lvx vr31,r12,r0
+ blr
+
+#endif /* CONFIG_ALTIVEC */
+
#endif /* CONFIG_PPC64 */
#endif
__u8 pad_0x02e8[0x0300-0x02e8]; /* 0x02e8 */
/* Interrupt response block */
- __u8 irb[64]; /* 0x0300 */
+ __u8 irb[96]; /* 0x0300 */
- __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */
+ __u8 pad_0x0360[0x0e00-0x0360]; /* 0x0360 */
/*
* 0xe00 contains the address of the IPL Parameter Information
__u64 cmf_hpp; /* 0x0378 */
/* Interrupt response block. */
- __u8 irb[64]; /* 0x0380 */
+ __u8 irb[96]; /* 0x0380 */
+ __u8 pad_0x03e0[0x0400-0x03e0]; /* 0x03e0 */
/* Per cpu primary space access list */
- __u32 paste[16]; /* 0x03c0 */
+ __u32 paste[16]; /* 0x0400 */
- __u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */
+ __u8 pad_0x0440[0x0e00-0x0440]; /* 0x0440 */
/*
* 0xe00 contains the address of the IPL Parameter Information
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
+ ptep_clear_flush(vma, addr, ptep);
}
static inline int huge_pte_none(pte_t pte)
#define ARCH_HAS_USER_SINGLE_STEP_INFO
+/*
+ * When hitting ptrace_stop(), we cannot return using SYSRET because
+ * that does not restore the full CPU state, only a minimal set. The
+ * ptracer can change arbitrary register values, which is usually okay
+ * because the usual ptrace stops run off the signal delivery path which
+ * forces IRET; however, ptrace_event() stops happen in arbitrary places
+ * in the kernel and don't force IRET path.
+ *
+ * So force IRET path after a ptrace stop.
+ */
+#define arch_ptrace_stop_needed(code, info) \
+({ \
+ set_thread_flag(TIF_NOTIFY_RESUME); \
+ false; \
+})
+
struct user_desc;
extern int do_get_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info);
jnz sysenter_audit
sysenter_do_call:
cmpl $(nr_syscalls), %eax
- jae syscall_badsys
+ jae sysenter_badsys
call *sys_call_table(,%eax,4)
movl %eax,PT_EAX(%esp)
+sysenter_after_call:
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
syscall_badsys:
movl $-ENOSYS,PT_EAX(%esp)
- jmp resume_userspace
+ jmp syscall_exit
+END(syscall_badsys)
+
+sysenter_badsys:
+ movl $-ENOSYS,PT_EAX(%esp)
+ jmp sysenter_after_call
END(syscall_badsys)
CFI_ENDPROC
/*
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
+int sysctl_ldt16 = 0;
+
#ifdef CONFIG_SMP
static void flush_ldt(void *current_mm)
{
* IRET leaking the high bits of the kernel stack address.
*/
#ifdef CONFIG_X86_64
- if (!ldt_info.seg_32bit) {
+ if (!ldt_info.seg_32bit && !sysctl_ldt16) {
error = -EINVAL;
goto out_unlock;
}
#ifdef CONFIG_X86_64
#define vdso_enabled sysctl_vsyscall32
#define arch_setup_additional_pages syscall32_setup_pages
+extern int sysctl_ldt16;
#endif
/*
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "ldt16",
+ .data = &sysctl_ldt16,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
{}
};
#ifdef CONFIG_X86
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+static inline int set_copy_dsdt(const struct dmi_system_id *id)
+{
+ return 0;
+}
+#else
static int set_copy_dsdt(const struct dmi_system_id *id)
{
printk(KERN_NOTICE "%s detected - "
acpi_gbl_copy_dsdt_locally = 1;
return 0;
}
+#endif
static struct dmi_system_id dsdt_dmi_table[] __initdata = {
/*
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
+#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
+ * when trying to clear the EC */
enum {
EC_FLAGS_QUERY_PENDING, /* Query is pending */
static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
+static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
/* --------------------------------------------------------------------------
Transaction Management
spin_unlock_irqrestore(&ec->curr_lock, flags);
}
-static int acpi_ec_sync_query(struct acpi_ec *ec);
+static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
{
if (state & ACPI_EC_FLAG_SCI) {
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
- return acpi_ec_sync_query(ec);
+ return acpi_ec_sync_query(ec, NULL);
}
return 0;
}
EXPORT_SYMBOL(ec_transaction);
+/*
+ * Process _Q events that might have accumulated in the EC.
+ * Run with locked ec mutex.
+ */
+static void acpi_ec_clear(struct acpi_ec *ec)
+{
+ int i, status;
+ u8 value = 0;
+
+ for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
+ status = acpi_ec_sync_query(ec, &value);
+ if (status || !value)
+ break;
+ }
+
+ if (unlikely(i == ACPI_EC_CLEAR_MAX))
+ pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
+ else
+ pr_info("%d stale EC events cleared\n", i);
+}
+
void acpi_ec_block_transactions(void)
{
struct acpi_ec *ec = first_ec;
mutex_lock(&ec->lock);
/* Allow transactions to be carried out again */
clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
+
+ if (EC_FLAGS_CLEAR_ON_RESUME)
+ acpi_ec_clear(ec);
+
mutex_unlock(&ec->lock);
}
kfree(handler);
}
-static int acpi_ec_sync_query(struct acpi_ec *ec)
+static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
{
u8 value = 0;
int status;
struct acpi_ec_query_handler *handler, *copy;
- if ((status = acpi_ec_query_unlocked(ec, &value)))
+
+ status = acpi_ec_query_unlocked(ec, &value);
+ if (data)
+ *data = value;
+ if (status)
return status;
+
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
/* have custom handler for this bit */
if (!ec)
return;
mutex_lock(&ec->lock);
- acpi_ec_sync_query(ec);
+ acpi_ec_sync_query(ec, NULL);
mutex_unlock(&ec->lock);
}
/* EC is fully operational, allow queries */
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+
+ /* Clear stale _Q events if hardware might require that */
+ if (EC_FLAGS_CLEAR_ON_RESUME) {
+ mutex_lock(&ec->lock);
+ acpi_ec_clear(ec);
+ mutex_unlock(&ec->lock);
+ }
return ret;
}
return 0;
}
+/*
+ * On some hardware it is necessary to clear events accumulated by the EC during
+ * sleep. These ECs stop reporting GPEs until they are manually polled, if too
+ * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=44161
+ *
+ * Ideally, the EC should also be instructed NOT to accumulate events during
+ * sleep (which Windows seems to do somehow), but the interface to control this
+ * behaviour is not known at this time.
+ *
+ * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
+ * however it is very likely that other Samsung models are affected.
+ *
+ * On systems which don't accumulate _Q events during sleep, this extra check
+ * should be harmless.
+ */
+static int ec_clear_on_resume(const struct dmi_system_id *id)
+{
+ pr_debug("Detected system needing EC poll on resume.\n");
+ EC_FLAGS_CLEAR_ON_RESUME = 1;
+ return 0;
+}
+
static struct dmi_system_id __initdata ec_dmi_table[] = {
{
ec_skip_dsdt_scan, "Compal JFL92", {
ec_validate_ecdt, "ASUS hardware", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
+ {
+ ec_clear_on_resume, "Samsung hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
{},
};
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0),
+ .driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
.driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642),
+ .driver_data = board_ahci_yes_fbs },
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
{
struct ata_queued_cmd *qc = NULL;
- unsigned int i;
+ unsigned int i, tag;
/* no command while frozen */
if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
return NULL;
- /* the last tag is reserved for internal command. */
- for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
- if (!test_and_set_bit(i, &ap->qc_allocated)) {
- qc = __ata_qc_from_tag(ap, i);
+ for (i = 0; i < ATA_MAX_QUEUE; i++) {
+ tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
+
+ /* the last tag is reserved for internal command. */
+ if (tag == ATA_TAG_INTERNAL)
+ continue;
+
+ if (!test_and_set_bit(tag, &ap->qc_allocated)) {
+ qc = __ata_qc_from_tag(ap, tag);
+ qc->tag = tag;
+ ap->last_tag = tag;
break;
}
-
- if (qc)
- qc->tag = i;
+ }
return qc;
}
}
// cast needed as there is no %? for pointer differences
PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li",
- skb, skb->head, (long) (skb_end_pointer(skb) - skb->head));
+ skb, skb->head, (long) skb_end_offset(skb));
rx.handle = virt_to_bus (skb);
rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
if (rx_give (dev, &rx, pool))
tail = readl(SAR_REG_RAWCT);
pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue),
- skb_end_pointer(queue) - queue->head - 16,
+ skb_end_offset(queue) - 16,
PCI_DMA_FROMDEVICE);
while (head != tail) {
int ret;
while (ptr) {
- ret = copy_to_user(param, ptr, sizeof(*ptr));
+ struct floppy_raw_cmd cmd = *ptr;
+ cmd.next = NULL;
+ cmd.kernel_data = NULL;
+ ret = copy_to_user(param, &cmd, sizeof(cmd));
if (ret)
return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
return -ENOMEM;
*rcmd = ptr;
ret = copy_from_user(ptr, param, sizeof(*ptr));
- if (ret)
- return -EFAULT;
ptr->next = NULL;
ptr->buffer_length = 0;
+ ptr->kernel_data = NULL;
+ if (ret)
+ return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
if (ptr->cmd_count > 33)
/* the command may now also take up the space
for (i = 0; i < 16; i++)
ptr->reply[i] = 0;
ptr->resultcode = 0;
- ptr->kernel_data = NULL;
if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
if (ptr->length <= 0)
{ USB_DEVICE(0x04CA, 0x3004) },
{ USB_DEVICE(0x04CA, 0x3005) },
{ USB_DEVICE(0x04CA, 0x3006) },
+ { USB_DEVICE(0x04CA, 0x3007) },
{ USB_DEVICE(0x04CA, 0x3008) },
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x0CF3, 0xE004) },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
int hci_uart_tx_wakeup(struct hci_uart *hu)
{
- struct tty_struct *tty = hu->tty;
- struct hci_dev *hdev = hu->hdev;
- struct sk_buff *skb;
-
if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
return 0;
BT_DBG("");
+ schedule_work(&hu->write_work);
+
+ return 0;
+}
+
+static void hci_uart_write_work(struct work_struct *work)
+{
+ struct hci_uart *hu = container_of(work, struct hci_uart, write_work);
+ struct tty_struct *tty = hu->tty;
+ struct hci_dev *hdev = hu->hdev;
+ struct sk_buff *skb;
+
+ /* REVISIT: should we cope with bad skbs or ->write() returning
+ * and error value ?
+ */
+
restart:
clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
goto restart;
clear_bit(HCI_UART_SENDING, &hu->tx_state);
- return 0;
}
/* ------- Interface to HCI layer ------ */
hu->tty = tty;
tty->receive_room = 65536;
+ INIT_WORK(&hu->write_work, hci_uart_write_work);
+
spin_lock_init(&hu->rx_lock);
/* Flush any pending characters in the driver and line discipline. */
if (hdev)
hci_uart_close(hdev);
+ cancel_work_sync(&hu->write_work);
+
if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
if (hdev) {
hci_unregister_dev(hdev);
unsigned long flags;
unsigned long hdev_flags;
+ struct work_struct write_work;
+
struct hci_uart_proto *proto;
void *priv;
char *tmp; \
\
tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
- sprintf(tmp, format, param); \
- strcat(str, tmp); \
- kfree(tmp); \
+ if (likely(tmp)) { \
+ sprintf(tmp, format, param); \
+ strcat(str, tmp); \
+ kfree(tmp); \
+ } else { \
+ strcat(str, "kmalloc failure in SPRINTFCAT"); \
+ } \
}
static void report_jump_idx(u32 status, char *outstr)
static void mv_chan_activate(struct mv_xor_chan *chan)
{
- u32 activation;
-
dev_dbg(chan->device->common.dev, " activate chan.\n");
- activation = __raw_readl(XOR_ACTIVATION(chan));
- activation |= 0x1;
- __raw_writel(activation, XOR_ACTIVATION(chan));
+
+ /* writel ensures all descriptors are flushed before activation */
+ writel(BIT(0), XOR_ACTIVATION(chan));
}
static char mv_chan_is_busy(struct mv_xor_chan *chan)
int s = dmi->matches[i].slot;
if (s == DMI_NONE)
break;
- if (dmi_ident[s]
- && strstr(dmi_ident[s], dmi->matches[i].substr))
- continue;
+ if (dmi_ident[s]) {
+ if (!dmi->matches[i].exact_match &&
+ strstr(dmi_ident[s], dmi->matches[i].substr))
+ continue;
+ else if (dmi->matches[i].exact_match &&
+ !strcmp(dmi_ident[s], dmi->matches[i].substr))
+ continue;
+ }
+
/* No match */
return false;
}
retcode = -EFAULT;
goto err_i1;
}
- } else
+ } else if (cmd & IOC_OUT) {
memset(kdata, 0, usize);
+ }
if (ioctl->flags & DRM_UNLOCKED)
retcode = func(dev, kdata, file_priv);
* exec_object list, so it should have a GTT space bound by now.
*/
if (unlikely(target_offset == 0)) {
- DRM_ERROR("No GTT space found for object %d\n",
+ DRM_DEBUG("No GTT space found for object %d\n",
reloc->target_handle);
return ret;
}
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
- DRM_ERROR("reloc with multiple write domains: "
+ DRM_DEBUG("reloc with multiple write domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
return ret;
}
if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
- DRM_ERROR("reloc with read/write CPU domains: "
+ DRM_DEBUG("reloc with read/write CPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
}
if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
reloc->write_domain != target_obj->pending_write_domain)) {
- DRM_ERROR("Write domain conflict: "
+ DRM_DEBUG("Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x\n",
obj, reloc->target_handle,
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset > obj->base.size - 4)) {
- DRM_ERROR("Relocation beyond object bounds: "
+ DRM_DEBUG("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
obj, reloc->target_handle,
(int) reloc->offset,
return ret;
}
if (unlikely(reloc->offset & 3)) {
- DRM_ERROR("Relocation not 4-byte aligned: "
+ DRM_DEBUG("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
obj, reloc->target_handle,
(int) reloc->offset);
* relocations were valid.
*/
for (j = 0; j < exec[i].relocation_count; j++) {
- if (copy_to_user(&user_relocs[j].presumed_offset,
- &invalid_offset,
- sizeof(invalid_offset))) {
+ if (__copy_to_user(&user_relocs[j].presumed_offset,
+ &invalid_offset,
+ sizeof(invalid_offset))) {
ret = -EFAULT;
mutex_lock(&dev->struct_mutex);
goto err;
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
- DRM_ERROR("Invalid object handle %d at index %d\n",
+ DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
goto err;
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
- DRM_ERROR("execbuf with invalid offset/length\n");
+ DRM_DEBUG("execbuf with invalid offset/length\n");
return -EINVAL;
}
break;
case I915_EXEC_BSD:
if (!HAS_BSD(dev)) {
- DRM_ERROR("execbuf with invalid ring (BSD)\n");
+ DRM_DEBUG("execbuf with invalid ring (BSD)\n");
return -EINVAL;
}
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BLT:
if (!HAS_BLT(dev)) {
- DRM_ERROR("execbuf with invalid ring (BLT)\n");
+ DRM_DEBUG("execbuf with invalid ring (BLT)\n");
return -EINVAL;
}
ring = &dev_priv->ring[BCS];
break;
default:
- DRM_ERROR("execbuf with unknown ring: %d\n",
+ DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
}
break;
default:
- DRM_ERROR("execbuf with unknown constants: %d\n", mode);
+ DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
return -EINVAL;
}
if (args->buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
if (args->num_cliprects != 0) {
if (ring != &dev_priv->ring[RCS]) {
- DRM_ERROR("clip rectangles are only valid with the render ring\n");
+ DRM_DEBUG("clip rectangles are only valid with the render ring\n");
return -EINVAL;
}
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
- DRM_ERROR("Invalid object handle %d at index %d\n",
+ DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
/* prevent error path from reading uninitialized data */
ret = -ENOENT;
}
if (!list_empty(&obj->exec_list)) {
- DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
+ DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
goto err;
/* Set the pending read domains for the batch buffer to COMMAND */
if (batch_obj->base.pending_write_domain) {
- DRM_ERROR("Attempting to use self-modifying batch buffer\n");
+ DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
ret = -EINVAL;
goto err;
}
int ret, i;
if (args->buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
if (exec_list == NULL || exec2_list == NULL) {
- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
drm_free_large(exec_list);
drm_free_large(exec2_list);
(uintptr_t) args->buffers_ptr,
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
+ DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec_list);
drm_free_large(exec2_list);
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
+ struct drm_i915_gem_exec_object __user *user_exec_list =
+ (void __user *)(uintptr_t)args->buffers_ptr;
+
/* Copy the new buffer offsets back to the user's exec list. */
- for (i = 0; i < args->buffer_count; i++)
- exec_list[i].offset = exec2_list[i].offset;
- /* ... and back out to userspace */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- exec_list,
- sizeof(*exec_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
+ for (i = 0; i < args->buffer_count; i++) {
+ ret = __copy_to_user(&user_exec_list[i].offset,
+ &exec2_list[i].offset,
+ sizeof(user_exec_list[i].offset));
+ if (ret) {
+ ret = -EFAULT;
+ DRM_DEBUG("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ break;
+ }
}
}
if (args->buffer_count < 1 ||
args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
- DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
+ DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
return -EINVAL;
}
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);
if (exec2_list == NULL) {
- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
return -ENOMEM;
}
(uintptr_t) args->buffers_ptr,
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
+ DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec2_list);
return -EFAULT;
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- exec2_list,
- sizeof(*exec2_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
+ struct drm_i915_gem_exec_object2 __user *user_exec_list =
+ (void __user *)(uintptr_t)args->buffers_ptr;
+ int i;
+
+ for (i = 0; i < args->buffer_count; i++) {
+ ret = __copy_to_user(&user_exec_list[i].offset,
+ &exec2_list[i].offset,
+ sizeof(user_exec_list[i].offset));
+ if (ret) {
+ ret = -EFAULT;
+ DRM_DEBUG("failed to copy %d exec entries "
+ "back to user\n",
+ args->buffer_count);
+ break;
+ }
}
}
acpi_status status;
acpi_handle dhandle, rom_handle;
- if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
- return false;
-
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
- switch (bpc) {
- case 8:
- default:
- args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
- break;
- case 10:
- args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
- break;
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+ switch (bpc) {
+ case 8:
+ default:
+ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
+ break;
+ case 10:
+ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
+ break;
+ }
}
args.v5.ucTransmitterID = encoder_id;
args.v5.ucEncoderMode = encoder_mode;
args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
- switch (bpc) {
- case 8:
- default:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
- break;
- case 10:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
- break;
- case 12:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
- break;
- case 16:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
- break;
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+ switch (bpc) {
+ case 8:
+ default:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
+ break;
+ case 10:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
+ break;
+ case 12:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
+ break;
+ case 16:
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
+ break;
+ }
}
args.v6.ucTransmitterID = encoder_id;
args.v6.ucEncoderMode = encoder_mode;
args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
else
args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
- } else
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+ } else {
args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ }
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
}
}
+ if (!found) {
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ continue;
+
+ status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+ if (!ACPI_FAILURE(status)) {
+ found = true;
+ break;
+ }
+ }
+ }
+
if (!found)
return false;
struct radeon_device *rdev = dev->dev_private;
if (ASIC_IS_DCE5(rdev) &&
- (rdev->clock.dp_extclk >= 53900) &&
+ (rdev->clock.default_dispclk >= 53900) &&
radeon_connector_encoder_is_hbr2(connector)) {
return true;
}
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 0);
rdev = rbo->rdev;
- if (bo->mem.mem_type == TTM_PL_VRAM) {
- size = bo->mem.num_pages << PAGE_SHIFT;
- offset = bo->mem.start << PAGE_SHIFT;
- if ((offset + size) > rdev->mc.visible_vram_size) {
- /* hurrah the memory is not visible ! */
- radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
- rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
- r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
- if (unlikely(r != 0))
- return r;
- offset = bo->mem.start << PAGE_SHIFT;
- /* this should not happen */
- if ((offset + size) > rdev->mc.visible_vram_size)
- return -EINVAL;
- }
+ if (bo->mem.mem_type != TTM_PL_VRAM)
+ return 0;
+
+ size = bo->mem.num_pages << PAGE_SHIFT;
+ offset = bo->mem.start << PAGE_SHIFT;
+ if ((offset + size) <= rdev->mc.visible_vram_size)
+ return 0;
+
+ /* hurrah the memory is not visible ! */
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+ rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+ if (unlikely(r == -ENOMEM)) {
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+ return ttm_bo_validate(bo, &rbo->placement, false, true, false);
+ } else if (unlikely(r != 0)) {
+ return r;
}
+
+ offset = bo->mem.start << PAGE_SHIFT;
+ /* this should never happen */
+ if ((offset + size) > rdev->mc.visible_vram_size)
+ return -EINVAL;
+
return 0;
}
return -EINVAL;
}
addr = addr & 0xFFFFFFFFFFFFF000ULL;
- addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
- addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
+ if (addr == rdev->dummy_page.addr)
+ addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+ else
+ addr |= (R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED |
+ R600_PTE_READABLE | R600_PTE_WRITEABLE);
writeq(addr, ptr + (i * 8));
return 0;
}
} *cmd;
int ret;
struct vmw_resource *res;
+ SVGA3dCmdSurfaceDMASuffix *suffix;
+ uint32_t bo_size;
cmd = container_of(header, struct vmw_dma_cmd, header);
+ suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
+ header->size - sizeof(*suffix));
+
+ /* Make sure device and verifier stays in sync. */
+ if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
+ DRM_ERROR("Invalid DMA suffix size.\n");
+ return -EINVAL;
+ }
+
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->dma.guest.ptr,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
+ /* Make sure DMA doesn't cross BO boundaries. */
+ bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
+ if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
+ DRM_ERROR("Invalid DMA offset.\n");
+ return -EINVAL;
+ }
+
+ bo_size -= cmd->dma.guest.ptr.offset;
+ if (unlikely(suffix->maximumOffset > bo_size))
+ suffix->maximumOffset = bo_size;
+
bo = &vmw_bo->base;
ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
cmd->dma.host.sid, &srf);
* ->numbered being checked, which may not always be the case when
* drivers go to access report values.
*/
- report = hid->report_enum[type].report_id_hash[id];
+ if (id == 0) {
+ /*
+ * Validating on id 0 means we should examine the first
+ * report in the list.
+ */
+ report = list_entry(
+ hid->report_enum[type].report_list.next,
+ struct hid_report, list);
+ } else {
+ report = hid->report_enum[type].report_id_hash[id];
+ }
if (!report) {
hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
return NULL;
#define USB_DEVICE_ID_SYMBOL_SCANNER_1 0x0800
#define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300
+#define USB_VENDOR_ID_SYNAPTICS 0x06cb
+#define USB_DEVICE_ID_SYNAPTICS_LTS1 0x0af8
+#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
+#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
+#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
+#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
+
#define USB_VENDOR_ID_THRUSTMASTER 0x044f
#define USB_VENDOR_ID_TOPSEED 0x0766
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS },
{ 0, 0 }
};
if (retval < 0)
goto fail;
- hyst = val - retval * 1000;
+ hyst = retval * 1000 - val;
hyst = DIV_ROUND_CLOSEST(hyst, 1000);
if (hyst < 0 || hyst > 255) {
retval = -ERANGE;
}
id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
- if (id != 0x01)
+ if (id < 0x01 || id > 0x04)
return -ENODEV;
return 0;
ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
dw_writel(dev, ic_con, DW_IC_CON);
+ /* enforce disabled interrupts (due to HW issues) */
+ i2c_dw_disable_int(dev);
+
/* Enable the adapter */
dw_writel(dev, 1, DW_IC_ENABLE);
struct platform_device *pdev = to_platform_device(dev);
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
- i2c->suspended = 0;
clk_enable(i2c->clk);
s3c24xx_i2c_init(i2c);
clk_disable(i2c->clk);
+ i2c->suspended = 0;
return 0;
}
{
struct ib_umad_port *port;
struct ib_umad_file *file;
- int ret;
+ int ret = -ENXIO;
port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
- if (port)
- kref_get(&port->umad_dev->ref);
- else
- return -ENXIO;
mutex_lock(&port->file_mutex);
- if (!port->ib_dev) {
- ret = -ENXIO;
+ if (!port->ib_dev)
goto out;
- }
+ ret = -ENOMEM;
file = kzalloc(sizeof *file, GFP_KERNEL);
- if (!file) {
- kref_put(&port->umad_dev->ref, ib_umad_release_dev);
- ret = -ENOMEM;
+ if (!file)
goto out;
- }
mutex_init(&file->mutex);
spin_lock_init(&file->send_lock);
list_add_tail(&file->port_list, &port->file_list);
ret = nonseekable_open(inode, filp);
+ if (ret) {
+ list_del(&file->port_list);
+ kfree(file);
+ goto out;
+ }
+
+ kref_get(&port->umad_dev->ref);
out:
mutex_unlock(&port->file_mutex);
int ret;
port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev);
- if (port)
- kref_get(&port->umad_dev->ref);
- else
- return -ENXIO;
if (filp->f_flags & O_NONBLOCK) {
if (down_trylock(&port->sm_sem)) {
}
ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
- if (ret) {
- up(&port->sm_sem);
- goto fail;
- }
+ if (ret)
+ goto err_up_sem;
filp->private_data = port;
- return nonseekable_open(inode, filp);
+ ret = nonseekable_open(inode, filp);
+ if (ret)
+ goto err_clr_sm_cap;
+
+ kref_get(&port->umad_dev->ref);
+
+ return 0;
+
+err_clr_sm_cap:
+ swap(props.set_port_cap_mask, props.clr_port_cap_mask);
+ ib_modify_port(port->ib_dev, port->port_num, 0, &props);
+
+err_up_sem:
+ up(&port->sm_sem);
fail:
- kref_put(&port->umad_dev->ref, ib_umad_release_dev);
return ret;
}
uresp.gts_key = ucontext->key;
ucontext->key += PAGE_SIZE;
spin_unlock(&ucontext->mmap_lock);
- ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
+ ret = ib_copy_to_udata(udata, &uresp,
+ sizeof(uresp) - sizeof(uresp.reserved));
if (ret)
goto err5;
__u32 cqid;
__u32 size;
__u32 qid_mask;
+ __u32 reserved; /* explicit padding (optional for i386) */
};
ret = -EFAULT;
goto bail;
}
+ dp.len = odp.len;
+ dp.unit = odp.unit;
+ dp.data = odp.data;
+ dp.pbc_wd = 0;
} else {
ret = -EINVAL;
goto bail;
event.event = IB_EVENT_PKEY_CHANGE;
event.device = &dd->verbs_dev.ibdev;
- event.element.port_num = 1;
+ event.element.port_num = port;
ib_dispatch_event(&event);
}
return 0;
err_iu:
srp_put_tx_iu(target, iu, SRP_IU_CMD);
+ /*
+ * Avoid that the loops that iterate over the request ring can
+ * encounter a dangling SCSI command pointer.
+ */
+ req->scmnd = NULL;
+
spin_lock_irqsave(&target->lock, flags);
list_add(&req->list, &target->free_reqs);
*/
#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/input.h>
input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
- input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
- input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+
+ /* For clickpads map both buttons to BTN_LEFT */
+ if (etd->fw_version & 0x001000) {
+ input_report_key(dev, BTN_LEFT, packet[0] & 0x03);
+ } else {
+ input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+ }
+
input_report_abs(dev, ABS_PRESSURE, pres);
input_report_abs(dev, ABS_TOOL_WIDTH, width);
static void elantech_input_sync_v4(struct psmouse *psmouse)
{
struct input_dev *dev = psmouse->dev;
+ struct elantech_data *etd = psmouse->private;
unsigned char *packet = psmouse->packet;
- input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
- input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+ /* For clickpads map both buttons to BTN_LEFT */
+ if (etd->fw_version & 0x001000) {
+ input_report_key(dev, BTN_LEFT, packet[0] & 0x03);
+ } else {
+ input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
+ input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+ }
+
input_mt_report_pointer_emulation(dev, true);
input_sync(dev);
}
break;
case 3:
- etd->reg_10 = 0x0b;
+ if (etd->set_hw_resolution)
+ etd->reg_10 = 0x0b;
+ else
+ etd->reg_10 = 0x01;
+
if (elantech_write_reg(psmouse, 0x10, etd->reg_10))
rc = -1;
return 0;
}
+/*
+ * Some hw_version 3 models go into error state when we try to set
+ * bit 3 and/or bit 1 of r10.
+ */
+static const struct dmi_system_id no_hw_res_dmi_table[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ {
+ /* Gigabyte U2442 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U2442"),
+ },
+ },
+#endif
+ { }
+};
+
/*
* determine hardware version and set some properties according to it.
*/
etd->reports_pressure = true;
}
+ /* Enable real hardware resolution on hw_version 3 ? */
+ etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table);
+
return 0;
}
bool paritycheck;
bool jumpy_cursor;
bool reports_pressure;
+ bool set_hw_resolution;
unsigned char hw_version;
unsigned int fw_version;
unsigned int single_finger_reports;
struct synaptics_data *priv = psmouse->private;
unsigned char resp[3];
- if (quirk_min_max) {
- priv->x_min = quirk_min_max[0];
- priv->x_max = quirk_min_max[1];
- priv->y_min = quirk_min_max[2];
- priv->y_max = quirk_min_max[3];
- return 0;
- }
-
if (SYN_ID_MAJOR(priv->identity) < 4)
return 0;
}
}
+ if (quirk_min_max) {
+ priv->x_min = quirk_min_max[0];
+ priv->x_max = quirk_min_max[1];
+ priv->y_min = quirk_min_max[2];
+ priv->y_max = quirk_min_max[3];
+ return 0;
+ }
+
if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 5 &&
SYN_CAP_MAX_DIMENSIONS(priv->ext_cap_0c)) {
if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MAX_COORDS, resp)) {
},
.driver_data = (int []){1232, 5710, 1156, 4696},
},
+ {
+ /* Lenovo ThinkPad Edge E431 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"),
+ },
+ .driver_data = (int []){1024, 5022, 2508, 4832},
+ },
+ {
+ /* Lenovo ThinkPad T431s */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
+ },
+ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
{
/* Lenovo ThinkPad T440s */
.matches = {
},
.driver_data = (int []){1024, 5112, 2024, 4832},
},
+ {
+ /* Lenovo ThinkPad L440 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
+ },
+ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
{
/* Lenovo ThinkPad T540p */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
},
- .driver_data = (int []){1024, 5056, 2058, 4832},
+ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
+ {
+ /* Lenovo ThinkPad L540 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
+ },
+ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
+ {
+ /* Lenovo ThinkPad W540 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W540"),
+ },
+ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
+ {
+ /* Lenovo Yoga S1 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+ "ThinkPad S1 Yoga"),
+ },
+ .driver_data = (int []){1232, 5710, 1156, 4696},
+ },
+ {
+ /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,
+ "ThinkPad X1 Carbon 2nd"),
+ },
+ .driver_data = (int []){1024, 5112, 2024, 4832},
},
#endif
{ }
{
struct dmar_domain *dmar_domain = domain->priv;
size_t size = PAGE_SIZE << gfp_order;
- int order;
+ int order, iommu_id;
order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
(iova + size - 1) >> VTD_PAGE_SHIFT);
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
+ for_each_set_bit(iommu_id, &dmar_domain->iommu_bmp, g_num_of_iommus) {
+ struct intel_iommu *iommu = g_iommus[iommu_id];
+ int num, ndomains;
+
+ /*
+ * find bit position of dmar_domain
+ */
+ ndomains = cap_ndoms(iommu->cap);
+ for_each_set_bit(num, iommu->domain_ids, ndomains) {
+ if (iommu->domains[num] == dmar_domain)
+ iommu_flush_iotlb_psi(iommu, num,
+ iova >> VTD_PAGE_SHIFT,
+ 1 << order, 0);
+ }
+ }
+
return order;
}
/* just incase thread restarts... */
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
return;
- if (mddev->ro) /* never try to sync a read-only array */
+ if (mddev->ro) {/* never try to sync a read-only array */
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
return;
+ }
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
if (mddev_trylock(mddev)) {
if (mddev->pers)
__md_stop_writes(mddev);
- mddev->safemode = 2;
+ if (mddev->persistent)
+ mddev->safemode = 2;
mddev_unlock(mddev);
}
need_delay = 1;
struct media_entity *ent;
struct media_entity_desc u_ent;
+ memset(&u_ent, 0, sizeof(u_ent));
if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
return -EFAULT;
* windows that fall outside that.
*/
for (i = 0; i < N_WIN_SIZES; i++) {
- struct ov7670_win_size *win = &ov7670_win_sizes[index];
+ struct ov7670_win_size *win = &ov7670_win_sizes[i];
if (info->min_width && win->width < info->min_width)
continue;
if (info->min_height && win->height < info->min_height)
static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
{
+ if (get_user(kp->type, &up->type))
+ return -EFAULT;
+
switch (kp->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
{
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
- get_user(kp->type, &up->type))
- return -EFAULT;
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
+ return -EFAULT;
return __get_v4l2_format32(kp, up);
}
static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
{
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
- copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt)))
- return -EFAULT;
+ copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
+ return -EFAULT;
return __get_v4l2_format32(&kp->format, &up->format);
}
out:
return res;
err:
+ bond_destroy_debugfs();
rtnl_link_unregister(&bond_link_ops);
err_link:
unregister_pernet_subsys(&bond_net_ops);
#define DRV_NAME "peak_pci"
struct peak_pci_chan {
- void __iomem *cfg_base; /* Common for all channels */
- struct net_device *next_dev; /* Chain of network devices */
- u16 icr_mask; /* Interrupt mask for fast ack */
+ void __iomem *cfg_base; /* Common for all channels */
+ struct net_device *prev_dev; /* Chain of network devices */
+ u16 icr_mask; /* Interrupt mask for fast ack */
};
#define PEAK_PCI_CAN_CLOCK (16000000 / 2)
{
struct sja1000_priv *priv;
struct peak_pci_chan *chan;
- struct net_device *dev, *dev0 = NULL;
+ struct net_device *dev, *prev_dev;
void __iomem *cfg_base, *reg_base;
u16 sub_sys_id, icr;
int i, err, channels;
}
/* Create chain of SJA1000 devices */
- if (i == 0)
- dev0 = dev;
- else
- chan->next_dev = dev;
+ chan->prev_dev = pci_get_drvdata(pdev);
+ pci_set_drvdata(pdev, dev);
dev_info(&pdev->dev,
"%s at reg_base=0x%p cfg_base=0x%p irq=%d\n",
dev->name, priv->reg_base, chan->cfg_base, dev->irq);
}
- pci_set_drvdata(pdev, dev0);
-
/* Enable interrupts */
writew(icr, cfg_base + PITA_ICR + 2);
/* Disable interrupts */
writew(0x0, cfg_base + PITA_ICR + 2);
- for (dev = dev0; dev; dev = chan->next_dev) {
- unregister_sja1000dev(dev);
- free_sja1000dev(dev);
+ for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
priv = netdev_priv(dev);
chan = priv->priv;
- dev = chan->next_dev;
+ prev_dev = chan->prev_dev;
+
+ unregister_sja1000dev(dev);
+ free_sja1000dev(dev);
}
pci_iounmap(pdev, reg_base);
static void __devexit peak_pci_remove(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata(pdev); /* First device */
+ struct net_device *dev = pci_get_drvdata(pdev); /* Last device */
struct sja1000_priv *priv = netdev_priv(dev);
struct peak_pci_chan *chan = priv->priv;
void __iomem *cfg_base = chan->cfg_base;
/* Loop over all registered devices */
while (1) {
+ struct net_device *prev_dev = chan->prev_dev;
+
dev_info(&pdev->dev, "removing device %s\n", dev->name);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
- dev = chan->next_dev;
+ dev = prev_dev;
if (!dev)
break;
priv = netdev_priv(dev);
if (tg3_flag(tp, MAX_RXPEND_64) &&
tp->rx_pending > 63)
tp->rx_pending = 63;
- tp->rx_jumbo_pending = ering->rx_jumbo_pending;
+
+ if (tg3_flag(tp, JUMBO_RING_ENABLE))
+ tp->rx_jumbo_pending = ering->rx_jumbo_pending;
for (i = 0; i < tp->irq_max; i++)
tp->napi[i].tx_pending = ering->tx_pending;
cq->ring = ring;
cq->is_tx = mode;
- spin_lock_init(&cq->lock);
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
cq->buf_size, 2 * PAGE_SIZE);
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_cq *cq;
- unsigned long flags;
int i;
for (i = 0; i < priv->rx_ring_num; i++) {
cq = &priv->rx_cq[i];
- spin_lock_irqsave(&cq->lock, flags);
- napi_synchronize(&cq->napi);
- mlx4_en_process_rx_cq(dev, cq, 0);
- spin_unlock_irqrestore(&cq->lock, flags);
+ napi_schedule(&cq->napi);
}
}
#endif
kfree(priv->steer);
}
-static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
{
struct mlx4_priv *priv;
struct mlx4_dev *dev;
/* Allow large DMA segments, up to the firmware limit of 1 GB */
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
- priv = kzalloc(sizeof *priv, GFP_KERNEL);
- if (!priv) {
- dev_err(&pdev->dev, "Device struct alloc failed, "
- "aborting.\n");
- err = -ENOMEM;
- goto err_release_regions;
- }
-
- dev = &priv->dev;
+ dev = pci_get_drvdata(pdev);
+ priv = mlx4_priv(dev);
dev->pdev = pdev;
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
mlx4_sense_init(dev);
mlx4_start_sense(dev);
- pci_set_drvdata(pdev, dev);
+ priv->removed = 0;
return 0;
static int __devinit mlx4_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
+ struct mlx4_priv *priv;
+ struct mlx4_dev *dev;
+
printk_once(KERN_INFO "%s", mlx4_version);
- return __mlx4_init_one(pdev, id);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev = &priv->dev;
+ pci_set_drvdata(pdev, dev);
+ priv->pci_dev_data = id->driver_data;
+
+ return __mlx4_init_one(pdev, id->driver_data);
}
-static void mlx4_remove_one(struct pci_dev *pdev)
+static void __mlx4_remove_one(struct pci_dev *pdev)
{
struct mlx4_dev *dev = pci_get_drvdata(pdev);
struct mlx4_priv *priv = mlx4_priv(dev);
+ int pci_dev_data;
int p;
- if (dev) {
- mlx4_stop_sense(dev);
- mlx4_unregister_device(dev);
+ if (priv->removed)
+ return;
- for (p = 1; p <= dev->caps.num_ports; p++) {
- mlx4_cleanup_port_info(&priv->port[p]);
- mlx4_CLOSE_PORT(dev, p);
- }
+ pci_dev_data = priv->pci_dev_data;
- mlx4_cleanup_counters_table(dev);
- mlx4_cleanup_mcg_table(dev);
- mlx4_cleanup_qp_table(dev);
- mlx4_cleanup_srq_table(dev);
- mlx4_cleanup_cq_table(dev);
- mlx4_cmd_use_polling(dev);
- mlx4_cleanup_eq_table(dev);
- mlx4_cleanup_mr_table(dev);
- mlx4_cleanup_xrcd_table(dev);
- mlx4_cleanup_pd_table(dev);
-
- iounmap(priv->kar);
- mlx4_uar_free(dev, &priv->driver_uar);
- mlx4_cleanup_uar_table(dev);
- mlx4_clear_steering(dev);
- mlx4_free_eq_table(dev);
- mlx4_close_hca(dev);
- mlx4_cmd_cleanup(dev);
-
- if (dev->flags & MLX4_FLAG_MSI_X)
- pci_disable_msix(pdev);
-
- kfree(priv);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
+ mlx4_stop_sense(dev);
+ mlx4_unregister_device(dev);
+
+ for (p = 1; p <= dev->caps.num_ports; p++) {
+ mlx4_cleanup_port_info(&priv->port[p]);
+ mlx4_CLOSE_PORT(dev, p);
}
+
+ mlx4_cleanup_counters_table(dev);
+ mlx4_cleanup_mcg_table(dev);
+ mlx4_cleanup_qp_table(dev);
+ mlx4_cleanup_srq_table(dev);
+ mlx4_cleanup_cq_table(dev);
+ mlx4_cmd_use_polling(dev);
+ mlx4_cleanup_eq_table(dev);
+ mlx4_cleanup_mr_table(dev);
+ mlx4_cleanup_xrcd_table(dev);
+ mlx4_cleanup_pd_table(dev);
+
+ iounmap(priv->kar);
+ mlx4_uar_free(dev, &priv->driver_uar);
+ mlx4_cleanup_uar_table(dev);
+ mlx4_clear_steering(dev);
+ mlx4_free_eq_table(dev);
+ mlx4_close_hca(dev);
+ mlx4_cmd_cleanup(dev);
+
+ if (dev->flags & MLX4_FLAG_MSI_X)
+ pci_disable_msix(pdev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ memset(priv, 0, sizeof(*priv));
+ priv->pci_dev_data = pci_dev_data;
+ priv->removed = 1;
+}
+
+static void mlx4_remove_one(struct pci_dev *pdev)
+{
+ struct mlx4_dev *dev = pci_get_drvdata(pdev);
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ __mlx4_remove_one(pdev);
+ kfree(priv);
+ pci_set_drvdata(pdev, NULL);
}
int mlx4_restart_one(struct pci_dev *pdev)
{
- mlx4_remove_one(pdev);
- return __mlx4_init_one(pdev, NULL);
+ struct mlx4_dev *dev = pci_get_drvdata(pdev);
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int pci_dev_data;
+
+ pci_dev_data = priv->pci_dev_data;
+ __mlx4_remove_one(pdev);
+ return __mlx4_init_one(pdev, pci_dev_data);
}
static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
struct list_head ctx_list;
spinlock_t ctx_lock;
+ int pci_dev_data;
+ int removed;
+
struct list_head pgdir_list;
struct mutex pgdir_mutex;
struct mlx4_cq mcq;
struct mlx4_hwq_resources wqres;
int ring;
- spinlock_t lock;
struct net_device *dev;
struct napi_struct napi;
/* Per-core Tx cq processing support */
const struct macvlan_dev *vlan = netdev_priv(dev);
const struct macvlan_port *port = vlan->port;
const struct macvlan_dev *dest;
- __u8 ip_summed = skb->ip_summed;
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
const struct ethhdr *eth = (void *)skb->data;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
/* send to other bridge ports directly */
if (is_multicast_ether_addr(eth->h_dest)) {
}
xmit_world:
- skb->ip_summed = ip_summed;
skb->dev = vlan->lowerdev;
return dev_queue_xmit(skb);
}
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- if (change & IFF_ALLMULTI)
- dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+ if (dev->flags & IFF_UP) {
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+ }
}
static void macvlan_set_multicast_list(struct net_device *dev)
d_printf(1, dev, "RX: size changed to %d, received %d, "
"copied %d, capacity %ld\n",
rx_size, read_size, rx_skb->len,
- (long) (skb_end_pointer(new_skb) - new_skb->head));
+ (long) skb_end_offset(new_skb));
goto retry;
}
/* In most cases, it happens due to the hardware scheduling a
break;
case B43_PHYTYPE_G:
status.band = IEEE80211_BAND_2GHZ;
- /* chanid is the radio channel cookie value as used
- * to tune the radio. */
- status.freq = chanid + 2400;
+ /* Somewhere between 478.104 and 508.1084 firmware for G-PHY
+ * has been modified to be compatible with N-PHY and others.
+ */
+ if (dev->fw.rev >= 508)
+ status.freq = ieee80211_channel_to_frequency(chanid, status.band);
+ else
+ status.freq = chanid + 2400;
break;
case B43_PHYTYPE_N:
case B43_PHYTYPE_LP:
crypto.cipher = rt2x00crypto_key_to_cipher(key);
if (crypto.cipher == CIPHER_NONE)
return -EOPNOTSUPP;
+ if (crypto.cipher == CIPHER_TKIP && rt2x00_is_usb(rt2x00dev))
+ return -EOPNOTSUPP;
crypto.cmd = cmd;
rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL,
bss_conf->bssid);
- /*
- * Update the beacon. This is only required on USB devices. PCI
- * devices fetch beacons periodically.
- */
- if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
- rt2x00queue_update_beacon(rt2x00dev, vif);
-
/*
* Start/stop beaconing.
*/
if (changes & BSS_CHANGED_BEACON_ENABLED) {
if (!bss_conf->enable_beacon && intf->enable_beacon) {
- rt2x00queue_clear_beacon(rt2x00dev, vif);
rt2x00dev->intf_beaconing--;
intf->enable_beacon = false;
+ /*
+ * Clear beacon in the H/W for this vif. This is needed
+ * to disable beaconing on this particular interface
+ * and keep it running on other interfaces.
+ */
+ rt2x00queue_clear_beacon(rt2x00dev, vif);
if (rt2x00dev->intf_beaconing == 0) {
/*
rt2x00queue_stop_queue(rt2x00dev->bcn);
mutex_unlock(&intf->beacon_skb_mutex);
}
-
-
} else if (bss_conf->enable_beacon && !intf->enable_beacon) {
rt2x00dev->intf_beaconing++;
intf->enable_beacon = true;
+ /*
+ * Upload beacon to the H/W. This is only required on
+ * USB devices. PCI devices fetch beacons periodically.
+ */
+ if (rt2x00_is_usb(rt2x00dev))
+ rt2x00queue_update_beacon(rt2x00dev, vif);
if (rt2x00dev->intf_beaconing == 1) {
/*
if (rtlhal->interface == INTF_PCI) {
rcu_read_lock();
sta = ieee80211_find_sta(mac->vif, mac->bssid);
+ if (!sta)
+ goto out_unlock;
}
rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
p_ra->ratr_state);
p_ra->pre_ratr_state = p_ra->ratr_state;
+ out_unlock:
if (rtlhal->interface == INTF_PCI)
rcu_read_unlock();
}
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
int err = 0;
static bool iqk_initialized;
+ unsigned long flags;
+
+ /* As this function can take a very long time (up to 350 ms)
+ * and can be called with irqs disabled, reenable the irqs
+ * to let the other devices continue being serviced.
+ *
+ * It is safe doing so since our own interrupts will only be enabled
+ * in a subsequent step.
+ */
+ local_save_flags(flags);
+ local_irq_enable();
rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
err = _rtl92cu_init_mac(hw);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("init mac failed!\n"));
- return err;
+ goto exit;
}
err = rtl92c_download_fw(hw);
if (err) {
("Failed to download FW. Init HW without FW now..\n"));
err = 1;
rtlhal->fw_ready = false;
- return err;
+ goto exit;
} else {
rtlhal->fw_ready = true;
}
_update_mac_setting(hw);
rtl92c_dm_init(hw);
_dump_registers(hw);
+exit:
+ local_irq_restore(flags);
return err;
}
return WRONG_BUS_FREQUENCY;
}
- bsp = ctrl->pci_dev->bus->cur_bus_speed;
- msp = ctrl->pci_dev->bus->max_bus_speed;
+ bsp = ctrl->pci_dev->subordinate->cur_bus_speed;
+ msp = ctrl->pci_dev->subordinate->max_bus_speed;
/* Check if there are other slots or devices on the same bus */
if (!list_empty(&ctrl->pci_dev->subordinate->devices))
/* Do not issue duplicate brightness change events to
* userspace. tpacpi_detect_brightness_capabilities() must have
* been called before this point */
- if (tp_features.bright_acpimode && acpi_video_backlight_support()) {
+ if (acpi_video_backlight_support()) {
pr_info("This ThinkPad has standard ACPI backlight "
"brightness control, supported by the ACPI "
"video driver\n");
#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
static DECLARE_COMPLETION(at91_rtc_updated);
+static DECLARE_COMPLETION(at91_rtc_upd_rdy);
static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
/*
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
+ wait_for_completion(&at91_rtc_upd_rdy);
+
/* Stop Time/Calendar from counting */
cr = at91_sys_read(AT91_RTC_CR);
at91_sys_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
/* Restart Time/Calendar */
cr = at91_sys_read(AT91_RTC_CR);
+ at91_sys_write(AT91_RTC_SCCR, AT91_RTC_SECEV);
at91_sys_write(AT91_RTC_CR, cr & ~(AT91_RTC_UPDCAL | AT91_RTC_UPDTIM));
+ at91_sys_write(AT91_RTC_IER, AT91_RTC_SECEV);
return 0;
}
if (rtsr) { /* this interrupt is shared! Is it ours? */
if (rtsr & AT91_RTC_ALARM)
events |= (RTC_AF | RTC_IRQF);
- if (rtsr & AT91_RTC_SECEV)
- events |= (RTC_UF | RTC_IRQF);
+ if (rtsr & AT91_RTC_SECEV) {
+ complete(&at91_rtc_upd_rdy);
+ at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV);
+ }
if (rtsr & AT91_RTC_ACKUPD)
complete(&at91_rtc_updated);
}
platform_set_drvdata(pdev, rtc);
+ /* enable SECEV interrupt in order to initialize at91_rtc_upd_rdy
+ * completion.
+ */
+ at91_sys_write(AT91_RTC_IER, AT91_RTC_SECEV);
+
printk(KERN_INFO "AT91 Real Time Clock driver.\n");
return 0;
}
u32 *reply_queue;
dma_addr_t reply_queue_h;
- unsigned long base_addr;
struct megasas_register_set __iomem *reg_set;
struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
u32 max_sectors_1;
u32 max_sectors_2;
u32 tmp_sectors, msix_enable;
+ resource_size_t base_addr;
struct megasas_register_set __iomem *reg_set;
struct megasas_ctrl_info *ctrl_info;
unsigned long bar_list;
/* Find first memory bar */
bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
- instance->base_addr = pci_resource_start(instance->pdev, instance->bar);
if (pci_request_selected_regions(instance->pdev, instance->bar,
"megasas: LSI")) {
printk(KERN_DEBUG "megasas: IO memory region busy!\n");
return -EBUSY;
}
- instance->reg_set = ioremap_nocache(instance->base_addr, 8192);
+ base_addr = pci_resource_start(instance->pdev, instance->bar);
+ instance->reg_set = ioremap_nocache(base_addr, 8192);
if (!instance->reg_set) {
printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
mpt2sas_base_free_resources(ioc);
pci_save_state(pdev);
- pci_disable_device(pdev);
pci_set_power_state(pdev, device_state);
return 0;
}
SCSI_SENSE_VALID(scmd))
continue;
+ if (status_byte(scmd->result) != CHECK_CONDITION)
+ /*
+ * don't request sense if there's no check condition
+ * status because the error we're processing isn't one
+ * that has a sense code (and some devices get
+ * confused by sense requests out of the blue)
+ */
+ continue;
+
SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
"%s: requesting sense\n",
current->comm));
/*
* Requeue this command. It will go before all other commands
- * that are already in the queue.
+ * that are already in the queue. Schedule requeue work under
+ * lock such that the kblockd_schedule_work() call happens
+ * before blk_cleanup_queue() finishes.
*/
spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, cmd->request);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
kblockd_schedule_work(q, &device->requeue_work);
+ spin_unlock_irqrestore(q->queue_lock, flags);
return 0;
}
struct Scsi_Host *shost = dev_to_shost(dev->parent);
unsigned long flags;
+ starget->state = STARGET_DEL;
transport_destroy_device(dev);
spin_lock_irqsave(shost->host_lock, flags);
if (shost->hostt->target_destroy)
return found_starget;
}
+/**
+ * scsi_target_reap_ref_release - remove target from visibility
+ * @kref: the reap_ref in the target being released
+ *
+ * Called on last put of reap_ref, which is the indication that no device
+ * under this target is visible anymore, so render the target invisible in
+ * sysfs. Note: we have to be in user context here because the target reaps
+ * should be done in places where the scsi device visibility is being removed.
+ */
+static void scsi_target_reap_ref_release(struct kref *kref)
+{
+ struct scsi_target *starget
+ = container_of(kref, struct scsi_target, reap_ref);
+
+ /*
+ * if we get here and the target is still in the CREATED state that
+ * means it was allocated but never made visible (because a scan
+ * turned up no LUNs), so don't call device_del() on it.
+ */
+ if (starget->state != STARGET_CREATED) {
+ transport_remove_device(&starget->dev);
+ device_del(&starget->dev);
+ }
+ scsi_target_destroy(starget);
+}
+
+static void scsi_target_reap_ref_put(struct scsi_target *starget)
+{
+ kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
+}
+
/**
* scsi_alloc_target - allocate a new or find an existing target
* @parent: parent of the target (need not be a scsi host)
+ shost->transportt->target_size;
struct scsi_target *starget;
struct scsi_target *found_target;
- int error;
+ int error, ref_got;
starget = kzalloc(size, GFP_KERNEL);
if (!starget) {
}
dev = &starget->dev;
device_initialize(dev);
- starget->reap_ref = 1;
+ kref_init(&starget->reap_ref);
dev->parent = get_device(parent);
dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
dev->bus = &scsi_bus_type;
return starget;
found:
- found_target->reap_ref++;
+ /*
+ * release routine already fired if kref is zero, so if we can still
+ * take the reference, the target must be alive. If we can't, it must
+ * be dying and we need to wait for a new target
+ */
+ ref_got = kref_get_unless_zero(&found_target->reap_ref);
+
spin_unlock_irqrestore(shost->host_lock, flags);
- if (found_target->state != STARGET_DEL) {
+ if (ref_got) {
put_device(dev);
return found_target;
}
- /* Unfortunately, we found a dying target; need to
- * wait until it's dead before we can get a new one */
+ /*
+ * Unfortunately, we found a dying target; need to wait until it's
+ * dead before we can get a new one. There is an anomaly here. We
+ * *should* call scsi_target_reap() to balance the kref_get() of the
+ * reap_ref above. However, since the target being released, it's
+ * already invisible and the reap_ref is irrelevant. If we call
+ * scsi_target_reap() we might spuriously do another device_del() on
+ * an already invisible target.
+ */
put_device(&found_target->dev);
- flush_scheduled_work();
+ /*
+ * length of time is irrelevant here, we just want to yield the CPU
+ * for a tick to avoid busy waiting for the target to die.
+ */
+ msleep(1);
goto retry;
}
-static void scsi_target_reap_usercontext(struct work_struct *work)
-{
- struct scsi_target *starget =
- container_of(work, struct scsi_target, ew.work);
-
- transport_remove_device(&starget->dev);
- device_del(&starget->dev);
- scsi_target_destroy(starget);
-}
-
/**
* scsi_target_reap - check to see if target is in use and destroy if not
* @starget: target to be checked
*/
void scsi_target_reap(struct scsi_target *starget)
{
- struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
- unsigned long flags;
- enum scsi_target_state state;
- int empty = 0;
-
- spin_lock_irqsave(shost->host_lock, flags);
- state = starget->state;
- if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
- empty = 1;
- starget->state = STARGET_DEL;
- }
- spin_unlock_irqrestore(shost->host_lock, flags);
-
- if (!empty)
- return;
-
- BUG_ON(state == STARGET_DEL);
- if (state == STARGET_CREATED)
- scsi_target_destroy(starget);
- else
- execute_in_process_context(scsi_target_reap_usercontext,
- &starget->ew);
+ /*
+ * serious problem if this triggers: STARGET_DEL is only set in the if
+ * the reap_ref drops to zero, so we're trying to do another final put
+ * on an already released kref
+ */
+ BUG_ON(starget->state == STARGET_DEL);
+ scsi_target_reap_ref_put(starget);
}
/**
}
mutex_unlock(&shost->scan_mutex);
scsi_autopm_put_target(starget);
+ /*
+ * paired with scsi_alloc_target(). Target will be destroyed unless
+ * scsi_probe_and_add_lun made an underlying device visible
+ */
scsi_target_reap(starget);
put_device(&starget->dev);
out_reap:
scsi_autopm_put_target(starget);
- /* now determine if the target has any children at all
- * and if not, nuke it */
+ /*
+ * paired with scsi_alloc_target(): determine if the target has
+ * any children at all and if not, nuke it
+ */
scsi_target_reap(starget);
put_device(&starget->dev);
{
struct scsi_device *sdev;
struct device *parent;
- struct scsi_target *starget;
struct list_head *this, *tmp;
unsigned long flags;
sdev = container_of(work, struct scsi_device, ew.work);
parent = sdev->sdev_gendev.parent;
- starget = to_scsi_target(parent);
spin_lock_irqsave(sdev->host->host_lock, flags);
- starget->reap_ref++;
list_del(&sdev->siblings);
list_del(&sdev->same_target_siblings);
list_del(&sdev->starved_entry);
/* NULL queue means the device can't be used */
sdev->request_queue = NULL;
- scsi_target_reap(scsi_target(sdev));
-
kfree(sdev->inquiry);
kfree(sdev);
device_del(dev);
} else
put_device(&sdev->sdev_dev);
+
+ /*
+ * Stop accepting new requests and wait until all queuecommand() and
+ * scsi_run_queue() invocations have finished before tearing down the
+ * device.
+ */
scsi_device_set_state(sdev, SDEV_DEL);
+ blk_cleanup_queue(sdev->request_queue);
+ cancel_work_sync(&sdev->requeue_work);
+
if (sdev->host->hostt->slave_destroy)
sdev->host->hostt->slave_destroy(sdev);
transport_destroy_device(dev);
- /* Freeing the queue signals to block that we're done */
- blk_cleanup_queue(sdev->request_queue);
+ /*
+ * Paired with the kref_get() in scsi_sysfs_initialize(). We have
+ * remoed sysfs visibility from the device, so make the target
+ * invisible if this was the last device underneath it.
+ */
+ scsi_target_reap(scsi_target(sdev));
+
put_device(dev);
}
continue;
if (starget->dev.parent == dev || &starget->dev == dev) {
/* assuming new targets arrive at the end */
- starget->reap_ref++;
+ kref_get(&starget->reap_ref);
spin_unlock_irqrestore(shost->host_lock, flags);
if (last)
scsi_target_reap(last);
list_add_tail(&sdev->same_target_siblings, &starget->devices);
list_add_tail(&sdev->siblings, &shost->__devices);
spin_unlock_irqrestore(shost->host_lock, flags);
+ /*
+ * device can now only be removed via __scsi_remove_device() so hold
+ * the target. Target will be held in CREATED state until something
+ * beneath it becomes visible (in which case it moves to RUNNING)
+ */
+ kref_get(&starget->reap_ref);
}
int scsi_is_sdev_device(const struct device *dev)
}
if (unlikely
(skb->truesize !=
- sizeof(*skb) + skb_end_pointer(skb) - skb->head)) {
+ sizeof(*skb) + skb_end_offset(skb))) {
/*
printk("TX buffer truesize has been changed\n");
*/
struct sta_info *psta;
struct sta_priv *pstapriv;
union recv_frame *prtnframe;
- u16 ether_type = 0;
+ u16 ether_type;
pstapriv = &adapter->stapriv;
ptr = get_recvframe_data(precv_frame);
psta = r8712_get_stainfo(pstapriv, psta_addr);
auth_alg = adapter->securitypriv.AuthAlgrthm;
if (auth_alg == 2) {
+ /* get ether_type */
+ ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE;
+ memcpy(ðer_type, ptr, 2);
+ ether_type = ntohs((unsigned short)ether_type);
+
if ((psta != NULL) && (psta->ieee8021x_blocked)) {
/* blocked
* only accept EAPOL frame */
- prtnframe = precv_frame;
- /*get ether_type */
- ptr = ptr + pfhdr->attrib.hdrlen +
- pfhdr->attrib.iv_len + LLC_HEADER_SIZE;
- memcpy(ðer_type, ptr, 2);
- ether_type = ntohs((unsigned short)ether_type);
if (ether_type == 0x888e)
prtnframe = precv_frame;
else {
pr_err("Unable to convert incoming challenge\n");
goto out;
}
+ /*
+ * During mutual authentication, the CHAP_C generated by the
+ * initiator must not match the original CHAP_C generated by
+ * the target.
+ */
+ if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
+ pr_err("initiator CHAP_C matches target CHAP_C, failing"
+ " login attempt\n");
+ goto out;
+ }
/*
* Generate CHAP_N and CHAP_R for mutual authentication.
*/
spin_unlock(&dev->se_port_lock);
se_dev_stop(dev);
+ lun->lun_sep = NULL;
lun->lun_se_dev = NULL;
}
- 1;
for (j = 0; j < sg_per_table; j++) {
- pg = alloc_pages(GFP_KERNEL, 0);
+ pg = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
if (!pg) {
pr_err("Unable to allocate scatterlist"
" pages for struct rd_dev_sg_table\n");
return hvc_driver;
}
-static int __init hvc_console_setup(struct console *co, char *options)
+static int hvc_console_setup(struct console *co, char *options)
{
if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES)
return -ENODEV;
tty->ops->flush_chars(tty);
} else {
while (nr > 0) {
+ mutex_lock(&tty->output_lock);
c = tty->ops->write(tty, b, nr);
+ mutex_unlock(&tty->output_lock);
if (c < 0) {
retval = c;
goto break_out;
static int acm_ctrl_msg(struct acm *acm, int request, int value,
void *buf, int len)
{
- int retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
+ int retval;
+
+ retval = usb_autopm_get_interface(acm->control);
+ if (retval)
+ return retval;
+
+ retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
request, USB_RT_ACM, value,
acm->control->altsetting[0].desc.bInterfaceNumber,
buf, len, 5000);
+
dev_dbg(&acm->control->dev,
"%s - rq 0x%02x, val %#x, len %#x, result %d\n",
__func__, request, value, len, retval);
+
+ usb_autopm_put_interface(acm->control);
+
return retval < 0 ? retval : 0;
}
dev_vdbg(&acm->data->dev, "%s - susp_count %d\n", __func__,
acm->susp_count);
- usb_autopm_get_interface_async(acm->control);
+ rc = usb_autopm_get_interface_async(acm->control);
+ if (rc) {
+ wb->use = 0;
+ spin_unlock_irqrestore(&acm->write_lock, flags);
+ return rc;
+ }
+
if (acm->susp_count) {
- if (!acm->delayed_wb)
- acm->delayed_wb = wb;
- else
- usb_autopm_put_interface_async(acm->control);
+ usb_anchor_urb(wb->urb, &acm->delayed);
spin_unlock_irqrestore(&acm->write_lock, flags);
- return 0; /* A white lie */
+ return 0;
}
usb_mark_last_busy(acm->dev);
static void acm_port_down(struct acm *acm)
{
+ struct urb *urb;
+ struct acm_wb *wb;
int i;
if (acm->dev) {
usb_autopm_get_interface(acm->control);
acm_set_control(acm, acm->ctrlout = 0);
+
+ for (;;) {
+ urb = usb_get_from_anchor(&acm->delayed);
+ if (!urb)
+ break;
+ wb = urb->context;
+ wb->use = 0;
+ usb_autopm_put_interface_async(acm->control);
+ }
+
usb_kill_urb(acm->ctrlurb);
for (i = 0; i < ACM_NW; i++)
usb_kill_urb(acm->wb[i].urb);
acm->bInterval = epread->bInterval;
tty_port_init(&acm->port);
acm->port.ops = &acm_port_ops;
+ init_usb_anchor(&acm->delayed);
buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
if (!buf) {
struct acm *acm = usb_get_intfdata(intf);
int cnt;
+ spin_lock_irq(&acm->read_lock);
+ spin_lock(&acm->write_lock);
if (PMSG_IS_AUTO(message)) {
- int b;
-
- spin_lock_irq(&acm->write_lock);
- b = acm->transmitting;
- spin_unlock_irq(&acm->write_lock);
- if (b)
+ if (acm->transmitting) {
+ spin_unlock(&acm->write_lock);
+ spin_unlock_irq(&acm->read_lock);
return -EBUSY;
+ }
}
-
- spin_lock_irq(&acm->read_lock);
- spin_lock(&acm->write_lock);
cnt = acm->susp_count++;
spin_unlock(&acm->write_lock);
spin_unlock_irq(&acm->read_lock);
static int acm_resume(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
- struct acm_wb *wb;
+ struct urb *urb;
int rv = 0;
- int cnt;
+ mutex_lock(&acm->mutex);
spin_lock_irq(&acm->read_lock);
- acm->susp_count -= 1;
- cnt = acm->susp_count;
- spin_unlock_irq(&acm->read_lock);
+ spin_lock(&acm->write_lock);
- if (cnt)
- return 0;
+ if (--acm->susp_count)
+ goto out;
- mutex_lock(&acm->mutex);
if (acm->port.count) {
- rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
-
- spin_lock_irq(&acm->write_lock);
- if (acm->delayed_wb) {
- wb = acm->delayed_wb;
- acm->delayed_wb = NULL;
- spin_unlock_irq(&acm->write_lock);
- acm_start_wb(acm, wb);
- } else {
- spin_unlock_irq(&acm->write_lock);
+ rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
+
+ for (;;) {
+ urb = usb_get_from_anchor(&acm->delayed);
+ if (!urb)
+ break;
+
+ acm_start_wb(acm, urb->context);
}
/*
* do the write path at all cost
*/
if (rv < 0)
- goto err_out;
+ goto out;
- rv = acm_submit_read_urbs(acm, GFP_NOIO);
+ rv = acm_submit_read_urbs(acm, GFP_ATOMIC);
}
-
-err_out:
+out:
+ spin_unlock(&acm->write_lock);
+ spin_unlock_irq(&acm->read_lock);
mutex_unlock(&acm->mutex);
+
return rv;
}
},
/* Motorola H24 HSPA module: */
{ USB_DEVICE(0x22b8, 0x2d91) }, /* modem */
- { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */
- { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */
- { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */
- { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */
- { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */
- { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */
- { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d92), /* modem + diagnostics */
+ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
+ },
+ { USB_DEVICE(0x22b8, 0x2d93), /* modem + AT port */
+ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
+ },
+ { USB_DEVICE(0x22b8, 0x2d95), /* modem + AT port + diagnostics */
+ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
+ },
+ { USB_DEVICE(0x22b8, 0x2d96), /* modem + NMEA */
+ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
+ },
+ { USB_DEVICE(0x22b8, 0x2d97), /* modem + diagnostics + NMEA */
+ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
+ },
+ { USB_DEVICE(0x22b8, 0x2d99), /* modem + AT port + NMEA */
+ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
+ },
+ { USB_DEVICE(0x22b8, 0x2d9a), /* modem + AT port + diagnostics + NMEA */
+ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
+ },
{ USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
.driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
unsigned int throttled:1; /* actually throttled */
unsigned int throttle_req:1; /* throttle requested */
u8 bInterval;
- struct acm_wb *delayed_wb; /* write queued for a device about to be woken */
+ struct usb_anchor delayed; /* writes queued for a device about to be woken */
};
#define CDC_DATA_INTERFACE_TYPE 0x0a
if (status == -EAGAIN || status == -EBUSY)
usb_mark_last_busy(udev);
- /* The PM core reacts badly unless the return code is 0,
- * -EAGAIN, or -EBUSY, so always return -EBUSY on an error.
+ /*
+ * The PM core reacts badly unless the return code is 0,
+ * -EAGAIN, or -EBUSY, so always return -EBUSY on an error
+ * (except for root hubs, because they don't suspend through
+ * an upstream port like other USB devices).
*/
- if (status != 0)
+ if (status != 0 && udev->parent)
return -EBUSY;
return status;
}
desc = intf->cur_altsetting;
hdev = interface_to_usbdev(intf);
- /* Hubs have proper suspend/resume support. USB 3.0 device suspend is
+ /*
+ * Hubs have proper suspend/resume support, except for root hubs
+ * where the controller driver doesn't have bus_suspend and
+ * bus_resume methods. Also, USB 3.0 device suspend is
* different from USB 2.0/1.1 device suspend, and unfortunately we
* don't support it yet. So leave autosuspend disabled for USB 3.0
* external hubs for now. Enable autosuspend for USB 3.0 roothubs,
* since that isn't a "real" hub.
*/
- if (!hub_is_superspeed(hdev) || !hdev->parent)
- usb_enable_autosuspend(hdev);
+ if (hdev->parent) { /* normal device */
+ if (!hub_is_superspeed(hdev))
+ usb_enable_autosuspend(hdev);
+ } else { /* root hub */
+ const struct hc_driver *drv = bus_to_hcd(hdev->bus)->driver;
+
+ if (drv->bus_suspend && drv->bus_resume)
+ usb_enable_autosuspend(hdev);
+ }
if (hdev->level == MAX_TOPO_LEVEL) {
dev_err(&intf->dev,
DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
},
},
+ {
+ /* HASEE E200 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
+ DMI_MATCH(DMI_BOARD_NAME, "E210"),
+ DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
+ },
+ },
{ }
};
{
int try_handoff = 1, tried_handoff = 0;
- /* The Pegatron Lucid tablet sporadically waits for 98 seconds trying
- * the handoff on its unused controller. Skip it. */
- if (pdev->vendor == 0x8086 && pdev->device == 0x283a) {
+ /*
+ * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
+ * the handoff on its unused controller. Skip it.
+ *
+ * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
+ */
+ if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
+ pdev->device == 0x27cc)) {
if (dmi_check_system(ehci_dmi_nohandoff_table))
try_handoff = 0;
}
kfree(cur_cd);
}
+ num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ for (i = 0; i < num_ports; i++) {
+ struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
+ for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
+ struct list_head *ep = &bwt->interval_bw[j].endpoints;
+ while (!list_empty(ep))
+ list_del_init(ep->next);
+ }
+ }
+
for (i = 1; i < MAX_HC_SLOTS; ++i)
xhci_free_virt_device(xhci, i);
if (!xhci->rh_bw)
goto no_bw;
- num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
- for (i = 0; i < num_ports; i++) {
- struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
- for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
- struct list_head *ep = &bwt->interval_bw[j].endpoints;
- while (!list_empty(ep))
- list_del_init(ep->next);
- }
- }
-
for (i = 0; i < num_ports; i++) {
struct xhci_tt_bw_info *tt, *n;
list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
struct xhci_dequeue_state *state)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
+ struct xhci_virt_ep *ep = &dev->eps[ep_index];
struct xhci_ring *ep_ring;
struct xhci_generic_trb *trb;
- struct xhci_ep_ctx *ep_ctx;
dma_addr_t addr;
+ u64 hw_dequeue;
ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
ep_index, stream_id);
stream_id);
return;
}
- state->new_cycle_state = 0;
- xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
- state->new_deq_seg = find_trb_seg(cur_td->start_seg,
- dev->eps[ep_index].stopped_trb,
- &state->new_cycle_state);
- if (!state->new_deq_seg) {
- WARN_ON(1);
- return;
- }
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg(xhci, "Finding endpoint context\n");
- ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
- state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
+ /* 4.6.9 the css flag is written to the stream context for streams */
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ struct xhci_stream_ctx *ctx =
+ &ep->stream_info->stream_ctx_array[stream_id];
+ hw_dequeue = le64_to_cpu(ctx->stream_ring);
+ } else {
+ struct xhci_ep_ctx *ep_ctx
+ = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+ hw_dequeue = le64_to_cpu(ep_ctx->deq);
+ }
+
+ /* Find virtual address and segment of hardware dequeue pointer */
+ state->new_deq_seg = ep_ring->deq_seg;
+ state->new_deq_ptr = ep_ring->dequeue;
+ while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
+ != (dma_addr_t)(hw_dequeue & ~0xf)) {
+ next_trb(xhci, ep_ring, &state->new_deq_seg,
+ &state->new_deq_ptr);
+ if (state->new_deq_ptr == ep_ring->dequeue) {
+ WARN_ON(1);
+ return;
+ }
+ }
+ /*
+ * Find cycle state for last_trb, starting at old cycle state of
+ * hw_dequeue. If there is only one segment ring, find_trb_seg() will
+ * return immediately and cannot toggle the cycle state if this search
+ * wraps around, so add one more toggle manually in that case.
+ */
+ state->new_cycle_state = hw_dequeue & 0x1;
+ if (ep_ring->first_seg == ep_ring->first_seg->next &&
+ cur_td->last_trb < state->new_deq_ptr)
+ state->new_cycle_state ^= 0x1;
state->new_deq_ptr = cur_td->last_trb;
xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
- state->new_deq_ptr,
- &state->new_cycle_state);
+ state->new_deq_ptr, &state->new_cycle_state);
if (!state->new_deq_seg) {
WARN_ON(1);
return;
}
+ /* Increment to find next TRB after last_trb. Cycle if appropriate. */
trb = &state->new_deq_ptr->generic;
if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
(trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
state->new_cycle_state ^= 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
- /*
- * If there is only one segment in a ring, find_trb_seg()'s while loop
- * will not run, and it will return before it has a chance to see if it
- * needs to toggle the cycle bit. It can't tell if the stalled transfer
- * ended just before the link TRB on a one-segment ring, or if the TD
- * wrapped around the top of the ring, because it doesn't have the TD in
- * question. Look for the one-segment case where stalled TRB's address
- * is greater than the new dequeue pointer address.
- */
- if (ep_ring->first_seg == ep_ring->first_seg->next &&
- state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
- state->new_cycle_state ^= 0x1;
+ /* Don't update the ring cycle state for the producer (us). */
xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
- /* Don't update the ring cycle state for the producer (us). */
xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
state->new_deq_seg);
addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
if (list_empty(&ep->cancelled_td_list)) {
xhci_stop_watchdog_timer_in_irq(xhci, ep);
ep->stopped_td = NULL;
- ep->stopped_trb = NULL;
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
return;
}
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
- /* Clear stopped_td and stopped_trb if endpoint is not halted */
- if (!(ep->ep_state & EP_HALTED)) {
+ /* Clear stopped_td if endpoint is not halted */
+ if (!(ep->ep_state & EP_HALTED))
ep->stopped_td = NULL;
- ep->stopped_trb = NULL;
- }
/*
* Drop the lock and complete the URBs in the cancelled TD list.
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
ep->ep_state |= EP_HALTED;
ep->stopped_td = td;
- ep->stopped_trb = event_trb;
ep->stopped_stream = stream_id;
xhci_queue_reset_ep(xhci, slot_id, ep_index);
xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
ep->stopped_td = NULL;
- ep->stopped_trb = NULL;
ep->stopped_stream = 0;
xhci_ring_cmd_db(xhci);
* the ring dequeue pointer or take this TD off any lists yet.
*/
ep->stopped_td = td;
- ep->stopped_trb = event_trb;
return 0;
} else {
if (trb_comp_code == COMP_STALL) {
* USB class driver clear the stall later.
*/
ep->stopped_td = td;
- ep->stopped_trb = event_trb;
ep->stopped_stream = ep_ring->stream_id;
} else if (xhci_requires_manual_halt_cleanup(xhci,
ep_ctx, trb_comp_code)) {
#else
-static int xhci_try_enable_msi(struct usb_hcd *hcd)
+static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
{
return 0;
}
-static void xhci_cleanup_msix(struct xhci_hcd *xhci)
+static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
{
}
-static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
+static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
{
}
xhci_ring_cmd_db(xhci);
}
virt_ep->stopped_td = NULL;
- virt_ep->stopped_trb = NULL;
virt_ep->stopped_stream = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
#define EP_GETTING_NO_STREAMS (1 << 5)
/* ---- Related to URB cancellation ---- */
struct list_head cancelled_td_list;
- /* The TRB that was last reported in a stopped endpoint ring */
- union xhci_trb *stopped_trb;
struct xhci_td *stopped_td;
unsigned int stopped_stream;
/* Watchdog timer for stop endpoint command to cancel URBs */
urb->context = &completion;
urb->complete = unlink1_callback;
+ if (usb_pipeout(urb->pipe)) {
+ simple_fill_buf(urb);
+ urb->transfer_flags |= URB_ZERO_PACKET;
+ }
+
/* keep the endpoint busy. there are lots of hc/hcd-internal
* states, and testing should get to all of them over time.
*
unlink_queued_callback, &ctx);
ctx.urbs[i]->transfer_dma = buf_dma;
ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+
+ if (usb_pipeout(ctx.urbs[i]->pipe)) {
+ simple_fill_buf(ctx.urbs[i]);
+ ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET;
+ }
}
/* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
{ USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
{ USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
+ { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
/*
* ELV devices:
*/
{ USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
/* Cressi Devices */
{ USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) },
+ /* Brainboxes Devices */
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_001_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_012_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_4_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_5_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_357_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_1_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_2_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_3_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_1_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_2_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_1_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
*/
#define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */
+/*
+ * NovaTech product ids (FTDI_VID)
+ */
+#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
+
/********************************/
/** third-party VID/PID combos **/
* Manufacturer: Cressi
*/
#define FTDI_CRESSI_PID 0x87d0
+
+/*
+ * Brainboxes devices
+ */
+#define BRAINBOXES_VID 0x05d1
+#define BRAINBOXES_VX_001_PID 0x1001 /* VX-001 ExpressCard 1 Port RS232 */
+#define BRAINBOXES_VX_012_PID 0x1002 /* VX-012 ExpressCard 2 Port RS232 */
+#define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */
+#define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */
+#define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */
+#define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */
+#define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */
+#define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */
+#define BRAINBOXES_US_606_3_PID 0x2003 /* US-606 6 Port RS232 Serial Port 4 and 6 */
+#define BRAINBOXES_US_701_1_PID 0x2011 /* US-701 4xRS232 1Mbaud Port 1 and 2 */
+#define BRAINBOXES_US_701_2_PID 0x2012 /* US-701 4xRS422 1Mbaud Port 3 and 4 */
+#define BRAINBOXES_US_279_1_PID 0x2021 /* US-279 8xRS422 1Mbaud Port 1 and 2 */
+#define BRAINBOXES_US_279_2_PID 0x2022 /* US-279 8xRS422 1Mbaud Port 3 and 4 */
+#define BRAINBOXES_US_279_3_PID 0x2023 /* US-279 8xRS422 1Mbaud Port 5 and 6 */
+#define BRAINBOXES_US_279_4_PID 0x2024 /* US-279 8xRS422 1Mbaud Port 7 and 8 */
+#define BRAINBOXES_US_346_1_PID 0x3011 /* US-346 4xRS422/485 1Mbaud Port 1 and 2 */
+#define BRAINBOXES_US_346_2_PID 0x3012 /* US-346 4xRS422/485 1Mbaud Port 3 and 4 */
+#define BRAINBOXES_US_257_PID 0x5001 /* US-257 2xRS232 1Mbaud */
+#define BRAINBOXES_US_313_PID 0x6001 /* US-313 2xRS422/485 1Mbaud */
+#define BRAINBOXES_US_357_PID 0x7001 /* US_357 1xRS232/422/485 */
+#define BRAINBOXES_US_842_1_PID 0x8001 /* US-842 8xRS422/485 1Mbaud Port 1 and 2 */
+#define BRAINBOXES_US_842_2_PID 0x8002 /* US-842 8xRS422/485 1Mbaud Port 3 and 4 */
+#define BRAINBOXES_US_842_3_PID 0x8003 /* US-842 8xRS422/485 1Mbaud Port 5 and 6 */
+#define BRAINBOXES_US_842_4_PID 0x8004 /* US-842 8xRS422/485 1Mbaud Port 7 and 8 */
+#define BRAINBOXES_US_160_1_PID 0x9001 /* US-160 16xRS232 1Mbaud Port 1 and 2 */
+#define BRAINBOXES_US_160_2_PID 0x9002 /* US-160 16xRS232 1Mbaud Port 3 and 4 */
+#define BRAINBOXES_US_160_3_PID 0x9003 /* US-160 16xRS232 1Mbaud Port 5 and 6 */
+#define BRAINBOXES_US_160_4_PID 0x9004 /* US-160 16xRS232 1Mbaud Port 7 and 8 */
+#define BRAINBOXES_US_160_5_PID 0x9005 /* US-160 16xRS232 1Mbaud Port 9 and 10 */
+#define BRAINBOXES_US_160_6_PID 0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */
+#define BRAINBOXES_US_160_7_PID 0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */
+#define BRAINBOXES_US_160_8_PID 0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/serial.h>
+#include <linux/swab.h>
#include <linux/kfifo.h>
#include <linux/ioctl.h>
#include <linux/firmware.h>
{
int status = 0;
__u8 read_length;
- __be16 be_start_address;
+ u16 be_start_address;
dbg("%s - @ %x for %d", __func__, start_address, length);
dbg("%s - @ %x for %d", __func__,
start_address, read_length);
}
- be_start_address = cpu_to_be16(start_address);
+ /*
+ * NOTE: Must use swab as wIndex is sent in little-endian
+ * byte order regardless of host byte order.
+ */
+ be_start_address = swab16((u16)start_address);
status = ti_vread_sync(dev, UMPC_MEMORY_READ,
(__u16)address_type,
- (__force __u16)be_start_address,
+ be_start_address,
buffer, read_length);
if (status) {
{
int status = 0;
int write_length;
- __be16 be_start_address;
+ u16 be_start_address;
/* We can only send a maximum of 1 aligned byte page at a time */
usb_serial_debug_data(debug, &serial->serial->dev->dev,
__func__, write_length, buffer);
- /* Write first page */
- be_start_address = cpu_to_be16(start_address);
+ /*
+ * Write first page.
+ *
+ * NOTE: Must use swab as wIndex is sent in little-endian byte order
+ * regardless of host byte order.
+ */
+ be_start_address = swab16((u16)start_address);
status = ti_vsend_sync(serial->serial->dev,
UMPC_MEMORY_WRITE, (__u16)address_type,
- (__force __u16)be_start_address,
+ be_start_address,
buffer, write_length);
if (status) {
dbg("%s - ERROR %d", __func__, status);
usb_serial_debug_data(debug, &serial->serial->dev->dev,
__func__, write_length, buffer);
- /* Write next page */
- be_start_address = cpu_to_be16(start_address);
+ /*
+ * Write next page.
+ *
+ * NOTE: Must use swab as wIndex is sent in little-endian byte
+ * order regardless of host byte order.
+ */
+ be_start_address = swab16((u16)start_address);
status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
(__u16)address_type,
- (__force __u16)be_start_address,
+ be_start_address,
buffer, write_length);
if (status) {
dev_err(&serial->serial->dev->dev, "%s - ERROR %d\n",
if (rom_desc->Type == desc_type)
return start_address;
- start_address = start_address + sizeof(struct ti_i2c_desc)
- + rom_desc->Size;
+ start_address = start_address + sizeof(struct ti_i2c_desc) +
+ le16_to_cpu(rom_desc->Size);
} while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type);
__u16 i;
__u8 cs = 0;
- for (i = 0; i < rom_desc->Size; i++)
+ for (i = 0; i < le16_to_cpu(rom_desc->Size); i++)
cs = (__u8)(cs + buffer[i]);
if (cs != rom_desc->CheckSum) {
break;
if ((start_address + sizeof(struct ti_i2c_desc) +
- rom_desc->Size) > TI_MAX_I2C_SIZE) {
+ le16_to_cpu(rom_desc->Size)) > TI_MAX_I2C_SIZE) {
status = -ENODEV;
dbg("%s - structure too big, erroring out.", __func__);
break;
/* Read the descriptor data */
status = read_rom(serial, start_address +
sizeof(struct ti_i2c_desc),
- rom_desc->Size, buffer);
+ le16_to_cpu(rom_desc->Size),
+ buffer);
if (status)
break;
break;
}
start_address = start_address + sizeof(struct ti_i2c_desc) +
- rom_desc->Size;
+ le16_to_cpu(rom_desc->Size);
} while ((rom_desc->Type != I2C_DESC_TYPE_ION) &&
(start_address < TI_MAX_I2C_SIZE));
/* Read the descriptor data */
status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc),
- rom_desc->Size, buffer);
+ le16_to_cpu(rom_desc->Size), buffer);
if (status)
goto exit;
firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data;
i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK;
- i2c_header->Size = (__u16)buffer_size;
+ i2c_header->Size = cpu_to_le16(buffer_size);
i2c_header->CheckSum = cs;
firmware_rec->Ver_Major = OperationalMajorVersion;
firmware_rec->Ver_Minor = OperationalMinorVersion;
struct ti_i2c_desc {
__u8 Type; // Type of descriptor
- __u16 Size; // Size of data only not including header
+ __le16 Size; // Size of data only not including header
__u8 CheckSum; // Checksum (8 bit sum of data only)
__u8 Data[0]; // Data starts here
} __attribute__((packed));
#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
#define NOVATELWIRELESS_PRODUCT_E362 0x9010
+#define NOVATELWIRELESS_PRODUCT_E371 0x9011
#define NOVATELWIRELESS_PRODUCT_G2 0xA010
#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
#define QUALCOMM_VENDOR_ID 0x05C6
#define CMOTECH_VENDOR_ID 0x16d8
-#define CMOTECH_PRODUCT_6008 0x6008
-#define CMOTECH_PRODUCT_6280 0x6280
+#define CMOTECH_PRODUCT_6001 0x6001
+#define CMOTECH_PRODUCT_CMU_300 0x6002
+#define CMOTECH_PRODUCT_6003 0x6003
+#define CMOTECH_PRODUCT_6004 0x6004
+#define CMOTECH_PRODUCT_6005 0x6005
+#define CMOTECH_PRODUCT_CGU_628A 0x6006
+#define CMOTECH_PRODUCT_CHE_628S 0x6007
+#define CMOTECH_PRODUCT_CMU_301 0x6008
+#define CMOTECH_PRODUCT_CHU_628 0x6280
+#define CMOTECH_PRODUCT_CHU_628S 0x6281
+#define CMOTECH_PRODUCT_CDU_680 0x6803
+#define CMOTECH_PRODUCT_CDU_685A 0x6804
+#define CMOTECH_PRODUCT_CHU_720S 0x7001
+#define CMOTECH_PRODUCT_7002 0x7002
+#define CMOTECH_PRODUCT_CHU_629K 0x7003
+#define CMOTECH_PRODUCT_7004 0x7004
+#define CMOTECH_PRODUCT_7005 0x7005
+#define CMOTECH_PRODUCT_CGU_629 0x7006
+#define CMOTECH_PRODUCT_CHU_629S 0x700a
+#define CMOTECH_PRODUCT_CHU_720I 0x7211
+#define CMOTECH_PRODUCT_7212 0x7212
+#define CMOTECH_PRODUCT_7213 0x7213
+#define CMOTECH_PRODUCT_7251 0x7251
+#define CMOTECH_PRODUCT_7252 0x7252
+#define CMOTECH_PRODUCT_7253 0x7253
#define TELIT_VENDOR_ID 0x1bc7
#define TELIT_PRODUCT_UC864E 0x1003
#define TELIT_PRODUCT_CC864_DUAL 0x1005
#define TELIT_PRODUCT_CC864_SINGLE 0x1006
#define TELIT_PRODUCT_DE910_DUAL 0x1010
+#define TELIT_PRODUCT_UE910_V2 0x1012
#define TELIT_PRODUCT_LE920 0x1200
/* ZTE PRODUCTS */
#define ALCATEL_PRODUCT_X060S_X200 0x0000
#define ALCATEL_PRODUCT_X220_X500D 0x0017
#define ALCATEL_PRODUCT_L100V 0x011e
+#define ALCATEL_PRODUCT_L800MA 0x0203
#define PIRELLI_VENDOR_ID 0x1266
#define PIRELLI_PRODUCT_C100_1 0x1002
#define OLIVETTI_PRODUCT_OLICARD100 0xc000
#define OLIVETTI_PRODUCT_OLICARD145 0xc003
#define OLIVETTI_PRODUCT_OLICARD200 0xc005
+#define OLIVETTI_PRODUCT_OLICARD500 0xc00b
/* Celot products */
#define CELOT_VENDOR_ID 0x211f
.reserved = BIT(1) | BIT(2),
};
+static const struct option_blacklist_info net_intf0_blacklist = {
+ .reserved = BIT(0),
+};
+
static const struct option_blacklist_info net_intf1_blacklist = {
.reserved = BIT(1),
};
/* Novatel Ovation MC551 a.k.a. Verizon USB551L */
{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
- { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
- { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
.driver_info = (kernel_ulong_t)&net_intf6_blacklist
},
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist
+ },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
struct usb_wwan_port_private *portdata;
int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
int val = 0;
+ int res;
dbg("%s", __func__);
if (is_blacklisted(ifNum, OPTION_BLACKLIST_SENDSETUP,
if (portdata->rts_state)
val |= 0x02;
- return usb_control_msg(serial->dev,
- usb_rcvctrlpipe(serial->dev, 0),
+ res = usb_autopm_get_interface(serial->interface);
+ if (res)
+ return res;
+
+ res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
0x22, 0x21, val, ifNum, NULL, 0, USB_CTRL_SET_TIMEOUT);
+
+ usb_autopm_put_interface(serial->interface);
+
+ return res;
}
MODULE_AUTHOR(DRIVER_AUTHOR);
{ USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
{ USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
{ USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
#define SUPERIAL_VENDOR_ID 0x5372
#define SUPERIAL_PRODUCT_ID 0x2303
-/* Hewlett-Packard LD220-HP POS Pole Display */
+/* Hewlett-Packard POS Pole Displays */
#define HP_VENDOR_ID 0x03f0
+#define HP_LD960_PRODUCT_ID 0x0b39
+#define HP_LCM220_PRODUCT_ID 0x3139
+#define HP_LCM960_PRODUCT_ID 0x3239
#define HP_LD220_PRODUCT_ID 0x3524
/* Cressi Edy (diving computer) PC interface */
spinlock_t susp_lock;
unsigned int suspended:1;
int in_flight;
+ unsigned int open_ports;
};
static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
{ USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
- { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
{ }
};
struct usb_serial *serial = port->serial;
struct sierra_port_private *portdata;
struct sierra_intf_private *intfdata = port->serial->private;
+ struct urb *urb;
dev_dbg(&port->dev, "%s\n", __func__);
if (serial->dev) {
mutex_lock(&serial->disc_mutex);
if (!serial->disconnected) {
- serial->interface->needs_remote_wakeup = 0;
/* odd error handling due to pm counters */
if (!usb_autopm_get_interface(serial->interface))
sierra_send_setup(port);
mutex_unlock(&serial->disc_mutex);
spin_lock_irq(&intfdata->susp_lock);
portdata->opened = 0;
+ if (--intfdata->open_ports == 0)
+ serial->interface->needs_remote_wakeup = 0;
spin_unlock_irq(&intfdata->susp_lock);
+ for (;;) {
+ urb = usb_get_from_anchor(&portdata->delayed);
+ if (!urb)
+ break;
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ usb_autopm_put_interface_async(serial->interface);
+ spin_lock(&portdata->lock);
+ portdata->outstanding_urbs--;
+ spin_unlock(&portdata->lock);
+ }
/* Stop reading urbs */
sierra_stop_rx_urbs(port);
usb_sndbulkpipe(serial->dev, endpoint) | USB_DIR_IN);
err = sierra_submit_rx_urbs(port, GFP_KERNEL);
- if (err) {
- /* get rid of everything as in close */
- sierra_close(port);
- /* restore balance for autopm */
- if (!serial->disconnected)
- usb_autopm_put_interface(serial->interface);
- return err;
- }
+ if (err)
+ goto err_submit;
+
sierra_send_setup(port);
- serial->interface->needs_remote_wakeup = 1;
spin_lock_irq(&intfdata->susp_lock);
portdata->opened = 1;
+ if (++intfdata->open_ports == 1)
+ serial->interface->needs_remote_wakeup = 1;
spin_unlock_irq(&intfdata->susp_lock);
usb_autopm_put_interface(serial->interface);
return 0;
+
+err_submit:
+ sierra_stop_rx_urbs(port);
+
+ for (i = 0; i < portdata->num_in_urbs; i++) {
+ sierra_release_urb(portdata->in_urbs[i]);
+ portdata->in_urbs[i] = NULL;
+ }
+
+ return err;
}
if (err < 0) {
intfdata->in_flight--;
usb_unanchor_urb(urb);
- usb_scuttle_anchored_urbs(&portdata->delayed);
- break;
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ spin_lock(&portdata->lock);
+ portdata->outstanding_urbs--;
+ spin_unlock(&portdata->lock);
+ continue;
}
}
/* must be called with BKL held */
printk(KERN_INFO "USB Serial deregistering driver %s\n",
device->description);
+
mutex_lock(&table_lock);
list_del(&device->driver_list);
- usb_serial_bus_deregister(device);
mutex_unlock(&table_lock);
+
+ usb_serial_bus_deregister(device);
}
EXPORT_SYMBOL_GPL(usb_serial_deregister);
usb_pipeendpoint(this_urb->pipe), i);
err = usb_autopm_get_interface_async(port->serial->interface);
- if (err < 0)
+ if (err < 0) {
+ clear_bit(i, &portdata->out_busy);
break;
+ }
/* send the data */
memcpy(this_urb->transfer_buffer, buf, todo);
}
EXPORT_SYMBOL(usb_wwan_open);
+static void unbusy_queued_urb(struct urb *urb,
+ struct usb_wwan_port_private *portdata)
+{
+ int i;
+
+ for (i = 0; i < N_OUT_URB; i++) {
+ if (urb == portdata->out_urbs[i]) {
+ clear_bit(i, &portdata->out_busy);
+ break;
+ }
+ }
+}
+
void usb_wwan_close(struct usb_serial_port *port)
{
int i;
struct usb_serial *serial = port->serial;
struct usb_wwan_port_private *portdata;
struct usb_wwan_intf_private *intfdata = port->serial->private;
+ struct urb *urb;
dbg("%s", __func__);
portdata = usb_get_serial_port_data(port);
portdata->opened = 0;
spin_unlock_irq(&intfdata->susp_lock);
+ for (;;) {
+ urb = usb_get_from_anchor(&portdata->delayed);
+ if (!urb)
+ break;
+ unbusy_queued_urb(urb, portdata);
+ usb_autopm_put_interface_async(serial->interface);
+ }
+
for (i = 0; i < N_IN_URB; i++)
usb_kill_urb(portdata->in_urbs[i]);
for (i = 0; i < N_OUT_URB; i++)
int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
{
struct usb_wwan_intf_private *intfdata = serial->private;
- int b;
dbg("%s entered", __func__);
+ spin_lock_irq(&intfdata->susp_lock);
if (PMSG_IS_AUTO(message)) {
- spin_lock_irq(&intfdata->susp_lock);
- b = intfdata->in_flight;
- spin_unlock_irq(&intfdata->susp_lock);
-
- if (b)
+ if (intfdata->in_flight) {
+ spin_unlock_irq(&intfdata->susp_lock);
return -EBUSY;
+ }
}
-
- spin_lock_irq(&intfdata->susp_lock);
intfdata->suspended = 1;
spin_unlock_irq(&intfdata->susp_lock);
+
stop_read_write_urbs(serial);
return 0;
}
EXPORT_SYMBOL(usb_wwan_suspend);
-static void unbusy_queued_urb(struct urb *urb, struct usb_wwan_port_private *portdata)
-{
- int i;
-
- for (i = 0; i < N_OUT_URB; i++) {
- if (urb == portdata->out_urbs[i]) {
- clear_bit(i, &portdata->out_busy);
- break;
- }
- }
-}
-
-static void play_delayed(struct usb_serial_port *port)
+static int play_delayed(struct usb_serial_port *port)
{
struct usb_wwan_intf_private *data;
struct usb_wwan_port_private *portdata;
struct urb *urb;
- int err;
+ int err = 0;
portdata = usb_get_serial_port_data(port);
data = port->serial->private;
break;
}
}
+
+ return err;
}
int usb_wwan_resume(struct usb_serial *serial)
struct usb_wwan_intf_private *intfdata = serial->private;
struct usb_wwan_port_private *portdata;
struct urb *urb;
- int err = 0;
+ int err;
+ int err_count = 0;
dbg("%s entered", __func__);
/* get the interrupt URBs resubmitted unconditionally */
if (err < 0) {
err("%s: Error %d for interrupt URB of port%d",
__func__, err, i);
- goto err_out;
+ err_count++;
}
}
+ spin_lock_irq(&intfdata->susp_lock);
for (i = 0; i < serial->num_ports; i++) {
/* walk all ports */
port = serial->port[i];
portdata = usb_get_serial_port_data(port);
/* skip closed ports */
- spin_lock_irq(&intfdata->susp_lock);
- if (!portdata->opened) {
- spin_unlock_irq(&intfdata->susp_lock);
+ if (!portdata->opened)
continue;
- }
+
+ err = play_delayed(port);
+ if (err)
+ err_count++;
for (j = 0; j < N_IN_URB; j++) {
urb = portdata->in_urbs[j];
if (err < 0) {
err("%s: Error %d for bulk URB %d",
__func__, err, i);
- spin_unlock_irq(&intfdata->susp_lock);
- goto err_out;
+ err_count++;
}
}
- play_delayed(port);
- spin_unlock_irq(&intfdata->susp_lock);
}
- spin_lock_irq(&intfdata->susp_lock);
intfdata->suspended = 0;
spin_unlock_irq(&intfdata->susp_lock);
-err_out:
- return err;
+
+ if (err_count)
+ return -EIO;
+
+ return 0;
}
EXPORT_SYMBOL(usb_wwan_resume);
#endif
us->transport_name = "Shuttle USBAT";
us->transport = usbat_flash_transport;
us->transport_reset = usb_stor_CB_reset;
- us->max_lun = 1;
+ us->max_lun = 0;
result = usb_stor_probe2(us);
return result;
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64 ),
+/* Reported by Daniele Forsi <dforsi@gmail.com> */
+UNUSUAL_DEV( 0x0421, 0x04b9, 0x0350, 0x0350,
+ "Nokia",
+ "5300",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
+/* Patch submitted by Victor A. Santos <victoraur.santos@gmail.com> */
+UNUSUAL_DEV( 0x0421, 0x05af, 0x0742, 0x0742,
+ "Nokia",
+ "305",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64),
+
/* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */
UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110,
"Nokia",
#define mga_fifo(n) do {} while ((mga_inl(M_FIFOSTATUS) & 0xFF) < (n))
-#define WaitTillIdle() do {} while (mga_inl(M_STATUS) & 0x10000)
+#define WaitTillIdle() do { mga_inl(M_STATUS); do {} while (mga_inl(M_STATUS) & 0x10000); } while (0)
/* code speedup */
#ifdef CONFIG_FB_MATROX_MILLENIUM
if (var->xres_virtual != var->xres || var->yres_virtual != var->yres)
return -EINVAL;
+ if (var->xres * var->yres * (var->bits_per_pixel >> 3) > info->fix.smem_len)
+ return -EINVAL;
if (var->nonstd)
return -EINVAL;
if (1000000000 / var->pixclock > TGA_PLL_MAX_FREQ)
par->yres = info->var.yres;
par->pll_freq = pll_freq = 1000000000 / info->var.pixclock;
par->bits_per_pixel = info->var.bits_per_pixel;
+ info->fix.line_length = par->xres * (par->bits_per_pixel >> 3);
tga_type = par->tga_type;
int tga_bus_tc = TGA_BUS_TC(par->dev);
u8 tga_type = par->tga_type;
const char *tga_type_name = NULL;
+ unsigned memory_size;
switch (tga_type) {
case TGA_TYPE_8PLANE:
tga_type_name = "Digital ZLXp-E1";
if (tga_bus_tc)
tga_type_name = "Digital ZLX-E1";
+ memory_size = 2097152;
break;
case TGA_TYPE_24PLANE:
if (tga_bus_pci)
tga_type_name = "Digital ZLXp-E2";
if (tga_bus_tc)
tga_type_name = "Digital ZLX-E2";
+ memory_size = 8388608;
break;
case TGA_TYPE_24PLUSZ:
if (tga_bus_pci)
tga_type_name = "Digital ZLXp-E3";
if (tga_bus_tc)
tga_type_name = "Digital ZLX-E3";
+ memory_size = 16777216;
break;
default:
tga_type_name = "Unknown";
+ memory_size = 16777216;
break;
}
? FB_VISUAL_PSEUDOCOLOR
: FB_VISUAL_DIRECTCOLOR);
- info->fix.line_length = par->xres * (par->bits_per_pixel >> 3);
info->fix.smem_start = (size_t) par->tga_fb_base;
- info->fix.smem_len = info->fix.line_length * par->yres;
+ info->fix.smem_len = memory_size;
info->fix.mmio_start = (size_t) par->tga_regs_base;
info->fix.mmio_len = 512;
modedb_tga = &modedb_tc;
modedbsize_tga = 1;
}
+
+ tgafb_init_fix(info);
+
ret = fb_find_mode(&info->var, info,
mode_option ? mode_option : mode_option_tga,
modedb_tga, modedbsize_tga, NULL,
}
tgafb_set_par(info);
- tgafb_init_fix(info);
if (register_framebuffer(info) < 0) {
printk(KERN_ERR "tgafb: Could not register framebuffer\n");
*/
#include <linux/bitops.h>
+#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/init.h>
static inline void ath79_wdt_enable(void)
{
ath79_wdt_keepalive();
+
+ /*
+ * Updating the TIMER register requires a few microseconds
+ * on the AR934x SoCs at least. Use a small delay to ensure
+ * that the TIMER register is updated within the hardware
+ * before enabling the watchdog.
+ */
+ udelay(2);
+
ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_FCR);
}
* shortening the size of the delalloc range we're searching
*/
free_extent_state(cached_state);
+ cached_state = NULL;
if (!loops) {
unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
max_bytes = PAGE_CACHE_SIZE - offset;
void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
{
- struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
if (!btrfs_test_opt(root, INODE_MAP_CACHE))
return;
-
again:
if (root->cached == BTRFS_CACHE_FINISHED) {
- __btrfs_add_free_space(ctl, objectid, 1);
+ __btrfs_add_free_space(pinned, objectid, 1);
} else {
- /*
- * If we are in the process of caching free ino chunks,
- * to avoid adding the same inode number to the free_ino
- * tree twice due to cross transaction, we'll leave it
- * in the pinned tree until a transaction is committed
- * or the caching work is done.
- */
-
mutex_lock(&root->fs_commit_mutex);
spin_lock(&root->cache_lock);
if (root->cached == BTRFS_CACHE_FINISHED) {
start_caching(root);
- if (objectid <= root->cache_progress ||
- objectid > root->highest_objectid)
- __btrfs_add_free_space(ctl, objectid, 1);
- else
- __btrfs_add_free_space(pinned, objectid, 1);
+ __btrfs_add_free_space(pinned, objectid, 1);
mutex_unlock(&root->fs_commit_mutex);
}
size_t count = iov_length(iov, nr_segs);
loff_t final_size = pos + count;
- if (pos >= inode->i_size)
+ if (pos >= i_size_read(inode))
return 0;
if ((pos & blockmask) || (final_size & blockmask))
ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
"logical block %lu\n", inode->i_ino, flags, map->m_len,
(unsigned long) map->m_lblk);
+
+ /* We can handle the block number less than EXT_MAX_BLOCKS */
+ if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
+ return -EIO;
+
/*
* Try to see if we can get the block without requesting a new
* file system block.
}
BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
start > ac->ac_o_ex.fe_logical);
- BUG_ON(size <= 0 || size > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
+ BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
/* now prepare goal request */
if (error) {
io_end->flag |= EXT4_IO_END_ERROR;
- ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
+ ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
"(offset %llu size %ld starting block %llu)",
- inode->i_ino,
+ error, inode->i_ino,
(unsigned long long) io_end->offset,
(long) io_end->size,
(unsigned long long)
bi_sector >> (inode->i_blkbits - 9));
+ mapping_set_error(inode->i_mapping, error);
}
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
set_page_writeback(page);
ClearPageError(page);
+ /*
+ * Comments copied from block_write_full_page_endio:
+ *
+ * The page straddles i_size. It must be zeroed out on each and every
+ * writepage invocation because it may be mmapped. "A file is mapped
+ * in multiples of the page size. For a file that is not a multiple of
+ * the page size, the remaining memory is zeroed when mapped, and
+ * writes to that region are not written out to the file."
+ */
+ if (len < PAGE_CACHE_SIZE)
+ zero_user_segment(page, len, PAGE_CACHE_SIZE);
+
for (bh = head = page_buffers(page), block_start = 0;
bh != head || !block_start;
block_start = block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_start >= len) {
- /*
- * Comments copied from block_write_full_page_endio:
- *
- * The page straddles i_size. It must be zeroed out on
- * each and every writepage invocation because it may
- * be mmapped. "A file is mapped in multiples of the
- * page size. For a file that is not a multiple of
- * the page size, the remaining memory is zeroed when
- * mapped, and writes to that region are not written
- * out to the file."
- */
- zero_user_segment(page, block_start, block_end);
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
continue;
restart:
break_time = flock->fl_break_time;
- if (break_time != 0) {
+ if (break_time != 0)
break_time -= jiffies;
- if (break_time == 0)
- break_time++;
- }
+ if (break_time == 0)
+ break_time++;
locks_insert_block(flock, new_fl);
unlock_flocks();
error = wait_event_interruptible_timeout(new_fl->fl_wait,
* by uid/gid. */
int i, j;
- if (pacl->a_count <= 4)
- return; /* no users or groups */
+ /* no users or groups */
+ if (!pacl || pacl->a_count <= 4)
+ return;
+
i = 1;
while (pacl->a_entries[i].e_tag == ACL_USER)
i++;
/*
* ACLs with no ACEs are treated differently in the inheritable
- * and effective cases: when there are no inheritable ACEs, we
- * set a zero-length default posix acl:
+ * and effective cases: when there are no inheritable ACEs,
+ * calls ->set_acl with a NULL ACL structure.
*/
- if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) {
- pacl = posix_acl_alloc(0, GFP_KERNEL);
- return pacl ? pacl : ERR_PTR(-ENOMEM);
- }
+ if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT))
+ return NULL;
+
/*
* When there are no effective ACEs, the following will end
* up setting a 3-element effective posix ACL with all
static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
{
+ int maxtime = max_cb_time();
struct rpc_timeout timeparms = {
- .to_initval = max_cb_time(),
+ .to_initval = maxtime,
.to_retries = 0,
+ .to_maxval = maxtime,
};
struct rpc_create_args args = {
.net = &init_net,
idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
}
+static void
+hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
+{
+ lockdep_assert_held(&recall_lock);
+
+ list_add(&dp->dl_perfile, &fp->fi_delegations);
+ list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
+}
+
/* Called under the state lock. */
static void
unhash_delegation(struct nfs4_delegation *dp)
{
unhash_stid(&dp->dl_stid);
- list_del_init(&dp->dl_perclnt);
spin_lock(&recall_lock);
+ list_del_init(&dp->dl_perclnt);
list_del_init(&dp->dl_perfile);
list_del_init(&dp->dl_recall_lru);
spin_unlock(&recall_lock);
}
memcpy(clp->cl_name.data, name.data, name.len);
clp->cl_name.len = name.len;
+ INIT_LIST_HEAD(&clp->cl_sessions);
+ idr_init(&clp->cl_stateids);
+ atomic_set(&clp->cl_refcount, 0);
+ clp->cl_cb_state = NFSD4_CB_UNKNOWN;
+ INIT_LIST_HEAD(&clp->cl_idhash);
+ INIT_LIST_HEAD(&clp->cl_strhash);
+ INIT_LIST_HEAD(&clp->cl_openowners);
+ INIT_LIST_HEAD(&clp->cl_delegations);
+ INIT_LIST_HEAD(&clp->cl_lru);
+ INIT_LIST_HEAD(&clp->cl_callbacks);
+ spin_lock_init(&clp->cl_lock);
+ rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
return clp;
}
list_del(&ses->se_perclnt);
nfsd4_put_session(ses);
}
+ rpc_destroy_wait_queue(&clp->cl_cb_waitq);
if (clp->cl_cred.cr_group_info)
put_group_info(clp->cl_cred.cr_group_info);
kfree(clp->cl_principal);
if (clp == NULL)
return NULL;
- INIT_LIST_HEAD(&clp->cl_sessions);
princ = svc_gss_principal(rqstp);
if (princ) {
}
}
- idr_init(&clp->cl_stateids);
memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
- atomic_set(&clp->cl_refcount, 0);
- clp->cl_cb_state = NFSD4_CB_UNKNOWN;
- INIT_LIST_HEAD(&clp->cl_idhash);
- INIT_LIST_HEAD(&clp->cl_strhash);
- INIT_LIST_HEAD(&clp->cl_openowners);
- INIT_LIST_HEAD(&clp->cl_delegations);
- INIT_LIST_HEAD(&clp->cl_lru);
- INIT_LIST_HEAD(&clp->cl_callbacks);
- spin_lock_init(&clp->cl_lock);
INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc);
clp->cl_time = get_seconds();
clear_bit(0, &clp->cl_cb_slot_busy);
- rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
copy_verf(clp, verf);
rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
clp->cl_flavor = rqstp->rq_flavor;
if (!fl)
return -ENOMEM;
fl->fl_file = find_readable_file(fp);
- list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
if (status) {
- list_del_init(&dp->dl_perclnt);
locks_free_lock(fl);
return -ENOMEM;
}
fp->fi_deleg_file = fl->fl_file;
get_file(fp->fi_deleg_file);
atomic_set(&fp->fi_delegees, 1);
- list_add(&dp->dl_perfile, &fp->fi_delegations);
+ spin_lock(&recall_lock);
+ hash_delegation_locked(dp, fp);
+ spin_unlock(&recall_lock);
return 0;
}
return -EAGAIN;
}
atomic_inc(&fp->fi_delegees);
- list_add(&dp->dl_perfile, &fp->fi_delegations);
+ hash_delegation_locked(dp, fp);
spin_unlock(&recall_lock);
- list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
return 0;
}
static __be32
nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
{
- if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
+ struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
+
+ if (check_for_locks(stp->st_file, lo))
return nfserr_locks_held;
- release_lock_stateid(stp);
+ /*
+ * Currently there's a 1-1 lock stateid<->lockowner
+ * correspondance, and we have to delete the lockowner when we
+ * delete the lock stateid:
+ */
+ release_lockowner(lo);
return nfs_ok;
}
if (!same_owner_str(&lo->lo_owner, owner, clid))
return false;
+ if (list_empty(&lo->lo_owner.so_stateids)) {
+ WARN_ON_ONCE(1);
+ return false;
+ }
lst = list_first_entry(&lo->lo_owner.so_stateids,
struct nfs4_ol_stateid, st_perstateowner);
return lst->st_file->fi_inode == inode;
err = vfs_getattr(exp->ex_path.mnt, dentry, &stat);
if (err)
goto out_nfserr;
- if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL |
- FATTR4_WORD0_MAXNAME)) ||
+ if ((bmval0 & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
+ FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) ||
(bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
FATTR4_WORD1_SPACE_TOTAL))) {
err = vfs_statfs(&path, &statfs);
umode_t mode = 0;
int not_equiv = 0;
+ /*
+ * A null ACL can always be presented as mode bits.
+ */
+ if (!acl)
+ return 0;
+
FOREACH_ACL_ENTRY(pa, acl, pe) {
switch (pa->e_tag) {
case ACL_USER_OBJ:
return err;
}
-static void reiserfs_vfs_truncate_file(struct inode *inode)
+void reiserfs_vfs_truncate_file(struct inode *inode)
{
mutex_lock(&(REISERFS_I(inode)->tailpack));
reiserfs_truncate_file(inode, 1);
};
const struct inode_operations reiserfs_file_inode_operations = {
- .truncate = reiserfs_vfs_truncate_file,
.setattr = reiserfs_setattr,
.setxattr = reiserfs_setxattr,
.getxattr = reiserfs_getxattr,
loff_t isize = i_size_read(inode);
loff_t end = offset + iov_length(iov, nr_segs);
- if (end > isize)
- vmtruncate(inode, isize);
+ if ((end > isize) && inode_newsize_ok(inode, isize) == 0) {
+ truncate_setsize(inode, isize);
+ reiserfs_vfs_truncate_file(inode);
+ }
}
return ret;
*/
reiserfs_write_unlock_once(inode->i_sb, depth);
if ((attr->ia_valid & ATTR_SIZE) &&
- attr->ia_size != i_size_read(inode))
- error = vmtruncate(inode, attr->ia_size);
+ attr->ia_size != i_size_read(inode)) {
+ error = inode_newsize_ok(inode, attr->ia_size);
+ if (!error) {
+ /*
+ * Could race against reiserfs_file_release
+ * if called from NFS, so take tailpack mutex.
+ */
+ mutex_lock(&REISERFS_I(inode)->tailpack);
+ truncate_setsize(inode, attr->ia_size);
+ reiserfs_truncate_file(inode, 1);
+ mutex_unlock(&REISERFS_I(inode)->tailpack);
+ }
+ }
if (!error) {
setattr_copy(inode, attr);
ubifs_release_dirty_inode_budget(c, ui);
}
- unlock_page(page);
- return 0;
+ return VM_FAULT_LOCKED;
out_unlock:
unlock_page(page);
freed = ubifs_destroy_tnc_subtree(znode);
atomic_long_sub(freed, &ubifs_clean_zn_cnt);
atomic_long_sub(freed, &c->clean_zn_cnt);
- ubifs_assert(atomic_long_read(&c->clean_zn_cnt) >= 0);
total_freed += freed;
znode = zprev;
}
extern int ftrace_arch_read_dyn_info(char *buf, int size);
extern int skip_trace(unsigned long ip);
+extern void ftrace_module_init(struct module *mod);
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_release_mod(struct module *mod) {}
+static inline void ftrace_module_init(struct module *mod) {}
static inline int register_ftrace_command(struct ftrace_func_command *cmd)
{
return -EINVAL;
* @irq_count: stats field to detect stalled irqs
* @last_unhandled: aging timer for unhandled count
* @irqs_unhandled: stats field for spurious unhandled interrupts
+ * @threads_handled: stats field for deferred spurious detection of threaded handlers
+ * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
* @lock: locking for SMP
* @affinity_hint: hint to user space for preferred irq affinity
* @affinity_notify: context for notification of affinity changes
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
+ atomic_t threads_handled;
+ int threads_handled_last;
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
#ifdef CONFIG_SMP
unsigned long addr;
struct kvm_arch_async_pf arch;
struct page *page;
- bool done;
};
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
unsigned long qc_allocated;
unsigned int qc_active;
int nr_active_links; /* #links with active qcs */
+ unsigned int last_tag; /* track next tag hw expects */
struct ata_link link; /* host default link */
struct ata_link *slave_link; /* see ata_slave_link_init() */
* LZO Public Kernel Interface
* A mini subset of the LZO real-time data compression library
*
- * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com>
+ * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
*
* The full LZO package can be found at:
* http://www.oberhumer.com/opensource/lzo/
*
- * Changed for kernel use by:
+ * Changed for Linux kernel use by:
* Nitin Gupta <nitingupta910@gmail.com>
* Richard Purdie <rpurdie@openedhand.com>
*/
-#define LZO1X_MEM_COMPRESS (16384 * sizeof(unsigned char *))
-#define LZO1X_1_MEM_COMPRESS LZO1X_MEM_COMPRESS
+#define LZO1X_1_MEM_COMPRESS (8192 * sizeof(unsigned short))
+#define LZO1X_MEM_COMPRESS LZO1X_1_MEM_COMPRESS
#define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
-/* This requires 'workmem' of size LZO1X_1_MEM_COMPRESS */
+/* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */
int lzo1x_1_compress(const unsigned char *src, size_t src_len,
- unsigned char *dst, size_t *dst_len, void *wrkmem);
+ unsigned char *dst, size_t *dst_len, void *wrkmem);
/* safe decompression with overrun testing */
int lzo1x_decompress_safe(const unsigned char *src, size_t src_len,
- unsigned char *dst, size_t *dst_len);
+ unsigned char *dst, size_t *dst_len);
/*
* Return values (< 0 = Error)
#define LZO_E_EOF_NOT_FOUND (-7)
#define LZO_E_INPUT_NOT_CONSUMED (-8)
#define LZO_E_NOT_YET_IMPLEMENTED (-9)
+#define LZO_E_INVALID_ARGUMENT (-10)
#endif
};
struct dmi_strmatch {
- unsigned char slot;
+ unsigned char slot:7;
+ unsigned char exact_match:1;
char substr[79];
};
#define dmi_device_id dmi_system_id
#endif
-#define DMI_MATCH(a, b) { a, b }
+#define DMI_MATCH(a, b) { .slot = a, .substr = b }
+#define DMI_EXACT_MATCH(a, b) { .slot = a, .substr = b, .exact_match = 1 }
#define PLATFORM_NAME_SIZE 20
#define PLATFORM_MODULE_PREFIX "platform:"
#include <linux/compiler.h> /* For unlikely. */
#include <linux/sched.h> /* For struct task_struct. */
+#include <linux/pid_namespace.h> /* For task_active_pid_ns. */
extern long arch_ptrace(struct task_struct *child, long request,
}
}
+/**
+ * ptrace_event_pid - possibly stop for a ptrace event notification
+ * @event: %PTRACE_EVENT_* value to report
+ * @pid: process identifier for %PTRACE_GETEVENTMSG to return
+ *
+ * Check whether @event is enabled and, if so, report @event and @pid
+ * to the ptrace parent. @pid is reported as the pid_t seen from the
+ * the ptrace parent's pid namespace.
+ *
+ * Called without locks.
+ */
+static inline void ptrace_event_pid(int event, struct pid *pid)
+{
+ /*
+ * FIXME: There's a potential race if a ptracer in a different pid
+ * namespace than parent attaches between computing message below and
+ * when we acquire tasklist_lock in ptrace_stop(). If this happens,
+ * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG.
+ */
+ unsigned long message = 0;
+ struct pid_namespace *ns;
+
+ rcu_read_lock();
+ ns = task_active_pid_ns(rcu_dereference(current->parent));
+ if (ns)
+ message = pid_nr_ns(pid, ns);
+ rcu_read_unlock();
+
+ ptrace_event(event, message);
+}
+
/**
* ptrace_init_task - initialize ptrace state for a new child
* @child: new child task
* calling arch_ptrace_stop() when it would be superfluous. For example,
* if the thread has not been back to user mode since the last stop, the
* thread state might indicate that nothing needs to be done.
+ *
+ * This is guaranteed to be invoked once before a task stops for ptrace and
+ * may include arch-specific operations necessary prior to a ptrace stop.
*/
#define arch_ptrace_stop_needed(code, info) (0)
#endif
*,
int count);
int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *);
+void reiserfs_vfs_truncate_file(struct inode *inode);
int reiserfs_commit_page(struct inode *inode, struct page *page,
unsigned from, unsigned to);
int reiserfs_flush_old_commits(struct super_block *);
{
return skb->head + skb->end;
}
+
+static inline unsigned int skb_end_offset(const struct sk_buff *skb)
+{
+ return skb->end;
+}
#else
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
return skb->end;
}
+
+static inline unsigned int skb_end_offset(const struct sk_buff *skb)
+{
+ return skb->end - skb->head;
+}
#endif
/* Internal */
skb->sk = NULL;
}
+/**
+ * skb_orphan_frags - orphan the frags contained in a buffer
+ * @skb: buffer to orphan frags from
+ * @gfp_mask: allocation mask for replacement pages
+ *
+ * For each frag in the SKB which needs a destructor (i.e. has an
+ * owner) create a copy of that frag and release the original
+ * page by calling the destructor.
+ */
+static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
+{
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
+ return 0;
+ return skb_copy_ubufs(skb, gfp_mask);
+}
+
/**
* __skb_queue_purge - empty a list
* @list: list to empty
return false;
skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
- if (skb_end_pointer(skb) - skb->head < skb_size)
+ if (skb_end_offset(skb) < skb_size)
return false;
if (skb_shared(skb) || skb_cloned(skb))
/* can be called with or without local BH being disabled */
static inline int inet_getid(struct inet_peer *p, int more)
{
- int old, new;
more++;
inet_peer_refcheck(p);
- do {
- old = atomic_read(&p->ip_id_count);
- new = old + more;
- if (!new)
- new = 1;
- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
- return new;
+ return atomic_add_return(more, &p->ip_id_count) - more;
}
#endif /* _NET_INETPEER_H */
#define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010
#define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
+/* We do not (yet ?) support IPv6 jumbograms (RFC 2675)
+ * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header
+ */
+#define IP6_MAX_MTU (0xFFFF + sizeof(struct ipv6hdr))
+
/*
* rt6_srcprefs2flags() and rt6_flags2srcprefs() translate
* between IPV6_ADDR_PREFERENCES socket option values
struct list_head siblings;
struct list_head devices;
struct device dev;
- unsigned int reap_ref; /* protected by the host lock */
+ struct kref reap_ref; /* last put renders target invisible */
unsigned int channel;
unsigned int id; /* target id ... replace
* scsi_device.id eventually */
#define SCSI_DEFAULT_TARGET_BLOCKED 3
char scsi_level;
- struct execute_work ew;
enum scsi_target_state state;
void *hostdata; /* available to low-level driver */
unsigned long starget_data[0]; /* for the transport */
int user_ctl_count; /* count of all user controls */
struct list_head controls; /* all controls for this card */
struct list_head ctl_files; /* active control files */
+ struct mutex user_ctl_lock; /* protects user controls against
+ concurrent access */
struct snd_info_entry *proc_root; /* root for soundcard specific files */
struct snd_info_entry *proc_id; /* the card id */
TP_fast_assign(
__entry->ip = ip;
- __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
+ __entry->refcnt = __this_cpu_read(mod->refptr->incs) - __this_cpu_read(mod->refptr->decs);
__assign_str(name, mod->name);
),
#include <linux/tracepoint.h>
#include <linux/unistd.h>
#include <linux/ftrace_event.h>
+#include <linux/thread_info.h>
#include <asm/ptrace.h>
void perf_sysexit_disable(struct ftrace_event_call *call);
#endif
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
+static inline void syscall_tracepoint_update(struct task_struct *p)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ set_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
+ else
+ clear_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
+}
+#else
+static inline void syscall_tracepoint_update(struct task_struct *p)
+{
+}
+#endif
+
#endif /* _TRACE_SYSCALL_H */
return AUDIT_BUILD_CONTEXT;
}
+static int audit_in_mask(const struct audit_krule *rule, unsigned long val)
+{
+ int word, bit;
+
+ if (val > 0xffffffff)
+ return false;
+
+ word = AUDIT_WORD(val);
+ if (word >= AUDIT_BITMASK_SIZE)
+ return false;
+
+ bit = AUDIT_BIT(val);
+
+ return rule->mask[word] & bit;
+}
+
/* At syscall entry and exit time, this filter is called if the
* audit_state is not low enough that auditing cannot take place, but is
* also not high enough that we already know we have to write an audit
rcu_read_lock();
if (!list_empty(list)) {
- int word = AUDIT_WORD(ctx->major);
- int bit = AUDIT_BIT(ctx->major);
-
list_for_each_entry_rcu(e, list, list) {
- if ((e->rule.mask[word] & bit) == bit &&
+ if (audit_in_mask(&e->rule, ctx->major) &&
audit_filter_rules(tsk, &e->rule, ctx, NULL,
&state, false)) {
rcu_read_unlock();
rcu_read_lock();
for (i = 0; i < ctx->name_count; i++) {
- int word = AUDIT_WORD(ctx->major);
- int bit = AUDIT_BIT(ctx->major);
struct audit_names *n = &ctx->names[i];
int h = audit_hash_ino((u32)n->ino);
struct list_head *list = &audit_inode_hash[h];
continue;
list_for_each_entry_rcu(e, list, list) {
- if ((e->rule.mask[word] & bit) == bit &&
+ if (audit_in_mask(&e->rule, ctx->major) &&
audit_filter_rules(tsk, &e->rule, ctx, n,
&state, false)) {
rcu_read_unlock();
cpuctx->exclusive = 0;
}
+struct remove_event {
+ struct perf_event *event;
+ bool detach_group;
+};
+
/*
* Cross CPU call to remove a performance event
*
*/
static int __perf_remove_from_context(void *info)
{
- struct perf_event *event = info;
+ struct remove_event *re = info;
+ struct perf_event *event = re->event;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
raw_spin_lock(&ctx->lock);
event_sched_out(event, cpuctx, ctx);
+ if (re->detach_group)
+ perf_group_detach(event);
list_del_event(event, ctx);
if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
ctx->is_active = 0;
* When called from perf_event_exit_task, it's OK because the
* context has been detached from its task.
*/
-static void perf_remove_from_context(struct perf_event *event)
+static void perf_remove_from_context(struct perf_event *event, bool detach_group)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
+ struct remove_event re = {
+ .event = event,
+ .detach_group = detach_group,
+ };
lockdep_assert_held(&ctx->mutex);
* Per cpu events are removed via an smp call and
* the removal is always successful.
*/
- cpu_function_call(event->cpu, __perf_remove_from_context, event);
+ cpu_function_call(event->cpu, __perf_remove_from_context, &re);
return;
}
retry:
- if (!task_function_call(task, __perf_remove_from_context, event))
+ if (!task_function_call(task, __perf_remove_from_context, &re))
return;
raw_spin_lock_irq(&ctx->lock);
* Since the task isn't running, its safe to remove the event, us
* holding the ctx->lock ensures the task won't get scheduled in.
*/
+ if (detach_group)
+ perf_group_detach(event);
list_del_event(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
}
* to trigger the AB-BA case.
*/
mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
- raw_spin_lock_irq(&ctx->lock);
- perf_group_detach(event);
- raw_spin_unlock_irq(&ctx->lock);
- perf_remove_from_context(event);
+ perf_remove_from_context(event, true);
mutex_unlock(&ctx->mutex);
free_event(event);
/* Recursion avoidance in each contexts */
int recursion[PERF_NR_CONTEXTS];
+
+ /* Keeps track of cpu being initialized/exited */
+ bool online;
};
static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
hwc->state = !(flags & PERF_EF_START);
head = find_swevent_head(swhash, event);
- if (WARN_ON_ONCE(!head))
+ if (!head) {
+ /*
+ * We can race with cpu hotplug code. Do not
+ * WARN if the cpu just got unplugged.
+ */
+ WARN_ON_ONCE(swhash->online);
return -EINVAL;
+ }
hlist_add_head_rcu(&event->hlist_entry, head);
if (attr.freq) {
if (attr.sample_freq > sysctl_perf_event_sample_rate)
return -EINVAL;
+ } else {
+ if (attr.sample_period & (1ULL << 63))
+ return -EINVAL;
}
/*
struct perf_event_context *gctx = group_leader->ctx;
mutex_lock(&gctx->mutex);
- perf_remove_from_context(group_leader);
+ perf_remove_from_context(group_leader, false);
/*
* Removing from the context ends up with disabled
perf_event__state_init(group_leader);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
- perf_remove_from_context(sibling);
+ perf_remove_from_context(sibling, false);
perf_event__state_init(sibling);
put_ctx(gctx);
}
struct perf_event_context *child_ctx,
struct task_struct *child)
{
- if (child_event->parent) {
- raw_spin_lock_irq(&child_ctx->lock);
- perf_group_detach(child_event);
- raw_spin_unlock_irq(&child_ctx->lock);
- }
-
- perf_remove_from_context(child_event);
+ perf_remove_from_context(child_event, !!child_event->parent);
/*
* It can happen that the parent exits first, and has events
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
+ swhash->online = true;
if (swhash->hlist_refcount > 0) {
struct swevent_hlist *hlist;
static void __perf_event_exit_context(void *__info)
{
+ struct remove_event re = { .detach_group = false };
struct perf_event_context *ctx = __info;
- struct perf_event *event;
perf_pmu_rotate_stop(ctx->pmu);
rcu_read_lock();
- list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
- __perf_remove_from_context(event);
+ list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
+ __perf_remove_from_context(&re);
rcu_read_unlock();
}
perf_event_exit_cpu_context(cpu);
mutex_lock(&swhash->hlist_mutex);
+ swhash->online = false;
swevent_hlist_release(swhash);
mutex_unlock(&swhash->hlist_mutex);
}
total_forks++;
spin_unlock(¤t->sighand->siglock);
+ syscall_tracepoint_update(p);
write_unlock_irq(&tasklist_lock);
+
proc_fork_connector(p);
cgroup_post_fork(p);
if (clone_flags & CLONE_THREAD)
*/
if (!IS_ERR(p)) {
struct completion vfork;
+ struct pid *pid;
trace_sched_process_fork(current, p);
- nr = task_pid_vnr(p);
+ pid = get_task_pid(p, PIDTYPE_PID);
+ nr = pid_vnr(pid);
if (clone_flags & CLONE_PARENT_SETTID)
put_user(nr, parent_tidptr);
/* forking complete and child started to run, tell ptracer */
if (unlikely(trace))
- ptrace_event(trace, nr);
+ ptrace_event_pid(trace, pid);
if (clone_flags & CLONE_VFORK) {
freezer_do_not_count();
wait_for_completion(&vfork);
freezer_count();
- ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
+ ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
}
+
+ put_pid(pid);
} else {
nr = PTR_ERR(p);
}
raw_spin_unlock_irq(&curr->pi_lock);
}
+/*
+ * We need to check the following states:
+ *
+ * Waiter | pi_state | pi->owner | uTID | uODIED | ?
+ *
+ * [1] NULL | --- | --- | 0 | 0/1 | Valid
+ * [2] NULL | --- | --- | >0 | 0/1 | Valid
+ *
+ * [3] Found | NULL | -- | Any | 0/1 | Invalid
+ *
+ * [4] Found | Found | NULL | 0 | 1 | Valid
+ * [5] Found | Found | NULL | >0 | 1 | Invalid
+ *
+ * [6] Found | Found | task | 0 | 1 | Valid
+ *
+ * [7] Found | Found | NULL | Any | 0 | Invalid
+ *
+ * [8] Found | Found | task | ==taskTID | 0/1 | Valid
+ * [9] Found | Found | task | 0 | 0 | Invalid
+ * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
+ *
+ * [1] Indicates that the kernel can acquire the futex atomically. We
+ * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
+ *
+ * [2] Valid, if TID does not belong to a kernel thread. If no matching
+ * thread is found then it indicates that the owner TID has died.
+ *
+ * [3] Invalid. The waiter is queued on a non PI futex
+ *
+ * [4] Valid state after exit_robust_list(), which sets the user space
+ * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
+ *
+ * [5] The user space value got manipulated between exit_robust_list()
+ * and exit_pi_state_list()
+ *
+ * [6] Valid state after exit_pi_state_list() which sets the new owner in
+ * the pi_state but cannot access the user space value.
+ *
+ * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
+ *
+ * [8] Owner and user space value match
+ *
+ * [9] There is no transient state which sets the user space TID to 0
+ * except exit_robust_list(), but this is indicated by the
+ * FUTEX_OWNER_DIED bit. See [4]
+ *
+ * [10] There is no transient state which leaves owner and user space
+ * TID out of sync.
+ */
static int
lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
union futex_key *key, struct futex_pi_state **ps)
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex(&this->key, key)) {
/*
- * Another waiter already exists - bump up
- * the refcount and return its pi_state:
+ * Sanity check the waiter before increasing
+ * the refcount and attaching to it.
*/
pi_state = this->pi_state;
/*
- * Userspace might have messed up non-PI and PI futexes
+ * Userspace might have messed up non-PI and
+ * PI futexes [3]
*/
if (unlikely(!pi_state))
return -EINVAL;
WARN_ON(!atomic_read(&pi_state->refcount));
/*
- * When pi_state->owner is NULL then the owner died
- * and another waiter is on the fly. pi_state->owner
- * is fixed up by the task which acquires
- * pi_state->rt_mutex.
- *
- * We do not check for pid == 0 which can happen when
- * the owner died and robust_list_exit() cleared the
- * TID.
+ * Handle the owner died case:
*/
- if (pid && pi_state->owner) {
+ if (uval & FUTEX_OWNER_DIED) {
+ /*
+ * exit_pi_state_list sets owner to NULL and
+ * wakes the topmost waiter. The task which
+ * acquires the pi_state->rt_mutex will fixup
+ * owner.
+ */
+ if (!pi_state->owner) {
+ /*
+ * No pi state owner, but the user
+ * space TID is not 0. Inconsistent
+ * state. [5]
+ */
+ if (pid)
+ return -EINVAL;
+ /*
+ * Take a ref on the state and
+ * return. [4]
+ */
+ goto out_state;
+ }
+
/*
- * Bail out if user space manipulated the
- * futex value.
+ * If TID is 0, then either the dying owner
+ * has not yet executed exit_pi_state_list()
+ * or some waiter acquired the rtmutex in the
+ * pi state, but did not yet fixup the TID in
+ * user space.
+ *
+ * Take a ref on the state and return. [6]
*/
- if (pid != task_pid_vnr(pi_state->owner))
+ if (!pid)
+ goto out_state;
+ } else {
+ /*
+ * If the owner died bit is not set,
+ * then the pi_state must have an
+ * owner. [7]
+ */
+ if (!pi_state->owner)
return -EINVAL;
}
+ /*
+ * Bail out if user space manipulated the
+ * futex value. If pi state exists then the
+ * owner TID must be the same as the user
+ * space TID. [9/10]
+ */
+ if (pid != task_pid_vnr(pi_state->owner))
+ return -EINVAL;
+
+ out_state:
atomic_inc(&pi_state->refcount);
*ps = pi_state;
-
return 0;
}
}
/*
* We are the first waiter - try to look up the real owner and attach
- * the new pi_state to it, but bail out when TID = 0
+ * the new pi_state to it, but bail out when TID = 0 [1]
*/
if (!pid)
return -ESRCH;
if (!p)
return -ESRCH;
+ if (!p->mm) {
+ put_task_struct(p);
+ return -EPERM;
+ }
+
/*
* We need to look at the task state flags to figure out,
* whether the task is exiting. To protect against the do_exit
return ret;
}
+ /*
+ * No existing pi state. First waiter. [2]
+ */
pi_state = alloc_pi_state();
/*
return -EDEADLK;
/*
- * Surprise - we got the lock. Just return to userspace:
+ * Surprise - we got the lock, but we do not trust user space at all.
*/
- if (unlikely(!curval))
- return 1;
+ if (unlikely(!curval)) {
+ /*
+ * We verify whether there is kernel state for this
+ * futex. If not, we can safely assume, that the 0 ->
+ * TID transition is correct. If state exists, we do
+ * not bother to fixup the user space state as it was
+ * corrupted already.
+ */
+ return futex_top_waiter(hb, key) ? -EINVAL : 1;
+ }
uval = curval;
struct task_struct *new_owner;
struct futex_pi_state *pi_state = this->pi_state;
u32 uninitialized_var(curval), newval;
+ int ret = 0;
if (!pi_state)
return -EINVAL;
new_owner = this->task;
/*
- * We pass it to the next owner. (The WAITERS bit is always
- * kept enabled while there is PI state around. We must also
- * preserve the owner died bit.)
+ * We pass it to the next owner. The WAITERS bit is always
+ * kept enabled while there is PI state around. We cleanup the
+ * owner died bit, because we are the owner.
*/
- if (!(uval & FUTEX_OWNER_DIED)) {
- int ret = 0;
+ newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
- newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
-
- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
- ret = -EFAULT;
- else if (curval != uval)
- ret = -EINVAL;
- if (ret) {
- raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
- return ret;
- }
+ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
+ ret = -EFAULT;
+ else if (curval != uval)
+ ret = -EINVAL;
+ if (ret) {
+ raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+ return ret;
}
raw_spin_lock_irq(&pi_state->owner->pi_lock);
*
* Returns:
* 0 - failed to acquire the lock atomicly
- * 1 - acquired the lock
+ * >0 - acquired the lock, return value is vpid of the top_waiter
* <0 - error
*/
static int futex_proxy_trylock_atomic(u32 __user *pifutex,
{
struct futex_q *top_waiter = NULL;
u32 curval;
- int ret;
+ int ret, vpid;
if (get_futex_value_locked(&curval, pifutex))
return -EFAULT;
* the contended case or if set_waiters is 1. The pi_state is returned
* in ps in contended cases.
*/
+ vpid = task_pid_vnr(top_waiter->task);
ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
set_waiters);
- if (ret == 1)
+ if (ret == 1) {
requeue_pi_wake_futex(top_waiter, key2, hb2);
-
+ return vpid;
+ }
return ret;
}
struct futex_hash_bucket *hb1, *hb2;
struct plist_head *head1;
struct futex_q *this, *next;
- u32 curval2;
if (requeue_pi) {
+ /*
+ * Requeue PI only works on two distinct uaddrs. This
+ * check is only valid for private futexes. See below.
+ */
+ if (uaddr1 == uaddr2)
+ return -EINVAL;
+
/*
* requeue_pi requires a pi_state, try to allocate it now
* without any locks in case it fails.
if (unlikely(ret != 0))
goto out_put_key1;
+ /*
+ * The check above which compares uaddrs is not sufficient for
+ * shared futexes. We need to compare the keys:
+ */
+ if (requeue_pi && match_futex(&key1, &key2)) {
+ ret = -EINVAL;
+ goto out_put_keys;
+ }
+
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);
* At this point the top_waiter has either taken uaddr2 or is
* waiting on it. If the former, then the pi_state will not
* exist yet, look it up one more time to ensure we have a
- * reference to it.
+ * reference to it. If the lock was taken, ret contains the
+ * vpid of the top waiter task.
*/
- if (ret == 1) {
+ if (ret > 0) {
WARN_ON(pi_state);
drop_count++;
task_count++;
- ret = get_futex_value_locked(&curval2, uaddr2);
- if (!ret)
- ret = lookup_pi_state(curval2, hb2, &key2,
- &pi_state);
+ /*
+ * If we acquired the lock, then the user
+ * space value of uaddr2 should be vpid. It
+ * cannot be changed by the top waiter as it
+ * is blocked on hb2 lock if it tries to do
+ * so. If something fiddled with it behind our
+ * back the pi state lookup might unearth
+ * it. So we rather use the known value than
+ * rereading and handing potential crap to
+ * lookup_pi_state.
+ */
+ ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
}
switch (ret) {
/*
* To avoid races, try to do the TID -> 0 atomic transition
* again. If it succeeds then we can return without waking
- * anyone else up:
+ * anyone else up. We only try this if neither the waiters nor
+ * the owner died bit are set.
*/
- if (!(uval & FUTEX_OWNER_DIED) &&
+ if (!(uval & ~FUTEX_TID_MASK) &&
cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
goto pi_faulted;
/*
/*
* No waiters - kernel unlocks the futex:
*/
- if (!(uval & FUTEX_OWNER_DIED)) {
- ret = unlock_futex_pi(uaddr, uval);
- if (ret == -EFAULT)
- goto pi_faulted;
- }
+ ret = unlock_futex_pi(uaddr, uval);
+ if (ret == -EFAULT)
+ goto pi_faulted;
out_unlock:
spin_unlock(&hb->lock);
if (ret)
goto out_key2;
+ /*
+ * The check above which compares uaddrs is not sufficient for
+ * shared futexes. We need to compare the keys:
+ */
+ if (match_futex(&q.key, &key2)) {
+ ret = -EINVAL;
+ goto out_put_keys;
+ }
+
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
goto again;
}
timer->base = new_base;
+ } else {
+ if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
+ cpu = this_cpu;
+ goto again;
+ }
}
return new_base;
}
cpu_base->expires_next.tv64 = expires_next.tv64;
+ /*
+ * If a hang was detected in the last timer interrupt then we
+ * leave the hang delay active in the hardware. We want the
+ * system to make progress. That also prevents the following
+ * scenario:
+ * T1 expires 50ms from now
+ * T2 expires 5s from now
+ *
+ * T1 is removed, so this code is called and would reprogram
+ * the hardware to 5s from now. Any hrtimer_start after that
+ * will not reprogram the hardware due to hang_detected being
+ * set. So we'd effectivly block all timers until the T2 event
+ * fires.
+ */
+ if (cpu_base->hang_detected)
+ return;
+
if (cpu_base->expires_next.tv64 != KTIME_MAX)
tick_program_event(cpu_base->expires_next, 1);
}
/* Remove an active timer from the queue: */
ret = remove_hrtimer(timer, base);
- /* Switch the timer base, if necessary: */
- new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
-
if (mode & HRTIMER_MODE_REL) {
- tim = ktime_add_safe(tim, new_base->get_time());
+ tim = ktime_add_safe(tim, base->get_time());
/*
* CONFIG_TIME_LOW_RES is a temporary way for architectures
* to signal that they simply return xtime in
hrtimer_set_expires_range_ns(timer, tim, delta_ns);
+ /* Switch the timer base, if necessary: */
+ new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+
timer_stats_hrtimer_set_start_info(timer);
leftmost = enqueue_hrtimer(timer, new_base);
raw_spin_unlock_irq(&desc->lock);
action_ret = handler_fn(desc, action);
- if (!noirqdebug)
- note_interrupt(action->irq, desc, action_ret);
+ if (action_ret == IRQ_HANDLED)
+ atomic_inc(&desc->threads_handled);
}
wake = atomic_dec_and_test(&desc->threads_active);
return action && (action->flags & IRQF_IRQPOLL);
}
+#define SPURIOUS_DEFERRED 0x80000000
+
void note_interrupt(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret)
{
if (desc->istate & IRQS_POLL_INPROGRESS)
return;
- /* we get here again via the threaded handler */
- if (action_ret == IRQ_WAKE_THREAD)
- return;
-
if (bad_action_ret(action_ret)) {
report_bad_irq(irq, desc, action_ret);
return;
}
+ /*
+ * We cannot call note_interrupt from the threaded handler
+ * because we need to look at the compound of all handlers
+ * (primary and threaded). Aside of that in the threaded
+ * shared case we have no serialization against an incoming
+ * hardware interrupt while we are dealing with a threaded
+ * result.
+ *
+ * So in case a thread is woken, we just note the fact and
+ * defer the analysis to the next hardware interrupt.
+ *
+ * The threaded handlers store whether they sucessfully
+ * handled an interrupt and we check whether that number
+ * changed versus the last invocation.
+ *
+ * We could handle all interrupts with the delayed by one
+ * mechanism, but for the non forced threaded case we'd just
+ * add pointless overhead to the straight hardirq interrupts
+ * for the sake of a few lines less code.
+ */
+ if (action_ret & IRQ_WAKE_THREAD) {
+ /*
+ * There is a thread woken. Check whether one of the
+ * shared primary handlers returned IRQ_HANDLED. If
+ * not we defer the spurious detection to the next
+ * interrupt.
+ */
+ if (action_ret == IRQ_WAKE_THREAD) {
+ int handled;
+ /*
+ * We use bit 31 of thread_handled_last to
+ * denote the deferred spurious detection
+ * active. No locking necessary as
+ * thread_handled_last is only accessed here
+ * and we have the guarantee that hard
+ * interrupts are not reentrant.
+ */
+ if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
+ desc->threads_handled_last |= SPURIOUS_DEFERRED;
+ return;
+ }
+ /*
+ * Check whether one of the threaded handlers
+ * returned IRQ_HANDLED since the last
+ * interrupt happened.
+ *
+ * For simplicity we just set bit 31, as it is
+ * set in threads_handled_last as well. So we
+ * avoid extra masking. And we really do not
+ * care about the high bits of the handled
+ * count. We just care about the count being
+ * different than the one we saw before.
+ */
+ handled = atomic_read(&desc->threads_handled);
+ handled |= SPURIOUS_DEFERRED;
+ if (handled != desc->threads_handled_last) {
+ action_ret = IRQ_HANDLED;
+ /*
+ * Note: We keep the SPURIOUS_DEFERRED
+ * bit set. We are handling the
+ * previous invocation right now.
+ * Keep it for the current one, so the
+ * next hardware interrupt will
+ * account for it.
+ */
+ desc->threads_handled_last = handled;
+ } else {
+ /*
+ * None of the threaded handlers felt
+ * responsible for the last interrupt
+ *
+ * We keep the SPURIOUS_DEFERRED bit
+ * set in threads_handled_last as we
+ * need to account for the current
+ * interrupt as well.
+ */
+ action_ret = IRQ_NONE;
+ }
+ } else {
+ /*
+ * One of the primary handlers returned
+ * IRQ_HANDLED. So we don't care about the
+ * threaded handlers on the same line. Clear
+ * the deferred detection bit.
+ *
+ * In theory we could/should check whether the
+ * deferred bit is set and take the result of
+ * the previous run into account here as
+ * well. But it's really not worth the
+ * trouble. If every other interrupt is
+ * handled we never trigger the spurious
+ * detector. And if this is just the one out
+ * of 100k unhandled ones which is handled
+ * then we merily delay the spurious detection
+ * by one hard interrupt. Not a real problem.
+ */
+ desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
+ }
+ }
+
if (unlikely(action_ret == IRQ_NONE)) {
/*
* If we are seeing only the odd spurious IRQ caused by
/* This has to be done once we're sure module name is unique. */
dynamic_debug_setup(info.debug, info.num_debug);
+ /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
+ ftrace_module_init(mod);
+
/* Find duplicate symbols */
err = verify_export_symbols(mod);
if (err < 0)
{
return (waiter != NULL);
}
+
+static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
+{
+ debug_rt_mutex_print_deadlock(w);
+}
owner = *p;
} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
}
+
+/*
+ * Safe fastpath aware unlock:
+ * 1) Clear the waiters bit
+ * 2) Drop lock->wait_lock
+ * 3) Try to unlock the lock with cmpxchg
+ */
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+ __releases(lock->wait_lock)
+{
+ struct task_struct *owner = rt_mutex_owner(lock);
+
+ clear_rt_mutex_waiters(lock);
+ raw_spin_unlock(&lock->wait_lock);
+ /*
+ * If a new waiter comes in between the unlock and the cmpxchg
+ * we have two situations:
+ *
+ * unlock(wait_lock);
+ * lock(wait_lock);
+ * cmpxchg(p, owner, 0) == owner
+ * mark_rt_mutex_waiters(lock);
+ * acquire(lock);
+ * or:
+ *
+ * unlock(wait_lock);
+ * lock(wait_lock);
+ * mark_rt_mutex_waiters(lock);
+ *
+ * cmpxchg(p, owner, 0) != owner
+ * enqueue_waiter();
+ * unlock(wait_lock);
+ * lock(wait_lock);
+ * wake waiter();
+ * unlock(wait_lock);
+ * lock(wait_lock);
+ * acquire(lock);
+ */
+ return rt_mutex_cmpxchg(lock, owner, NULL);
+}
+
#else
# define rt_mutex_cmpxchg(l,c,n) (0)
static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
lock->owner = (struct task_struct *)
((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
}
+
+/*
+ * Simple slow path only version: lock->owner is protected by lock->wait_lock.
+ */
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+ __releases(lock->wait_lock)
+{
+ lock->owner = NULL;
+ raw_spin_unlock(&lock->wait_lock);
+ return true;
+}
#endif
/*
*/
int max_lock_depth = 1024;
+static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
+{
+ return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
+}
+
/*
* Adjust the priority chain. Also used for deadlock detection.
* Decreases task's usage by one - may thus free the task.
+ *
+ * @task: the task owning the mutex (owner) for which a chain walk is
+ * probably needed
+ * @deadlock_detect: do we have to carry out deadlock detection?
+ * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
+ * things for a task that has just got its priority adjusted, and
+ * is waiting on a mutex)
+ * @next_lock: the mutex on which the owner of @orig_lock was blocked before
+ * we dropped its pi_lock. Is never dereferenced, only used for
+ * comparison to detect lock chain changes.
+ * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
+ * its priority to the mutex owner (can be NULL in the case
+ * depicted above or if the top waiter is gone away and we are
+ * actually deboosting the owner)
+ * @top_task: the current top waiter
+ *
* Returns 0 or -EDEADLK.
*/
static int rt_mutex_adjust_prio_chain(struct task_struct *task,
int deadlock_detect,
struct rt_mutex *orig_lock,
+ struct rt_mutex *next_lock,
struct rt_mutex_waiter *orig_waiter,
struct task_struct *top_task)
{
}
put_task_struct(task);
- return deadlock_detect ? -EDEADLK : 0;
+ return -EDEADLK;
}
retry:
/*
if (orig_waiter && !rt_mutex_owner(orig_lock))
goto out_unlock_pi;
+ /*
+ * We dropped all locks after taking a refcount on @task, so
+ * the task might have moved on in the lock chain or even left
+ * the chain completely and blocks now on an unrelated lock or
+ * on @orig_lock.
+ *
+ * We stored the lock on which @task was blocked in @next_lock,
+ * so we can detect the chain change.
+ */
+ if (next_lock != waiter->lock)
+ goto out_unlock_pi;
+
/*
* Drop out, when the task has no waiters. Note,
* top_waiter can be NULL, when we are in the deboosting
* mode!
*/
- if (top_waiter && (!task_has_pi_waiters(task) ||
- top_waiter != task_top_pi_waiter(task)))
- goto out_unlock_pi;
+ if (top_waiter) {
+ if (!task_has_pi_waiters(task))
+ goto out_unlock_pi;
+ /*
+ * If deadlock detection is off, we stop here if we
+ * are not the top pi waiter of the task.
+ */
+ if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
+ goto out_unlock_pi;
+ }
/*
* When deadlock detection is off then we check, if further
goto retry;
}
- /* Deadlock detection */
+ /*
+ * Deadlock detection. If the lock is the same as the original
+ * lock which caused us to walk the lock chain or if the
+ * current lock is owned by the task which initiated the chain
+ * walk, we detected a deadlock.
+ */
if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
raw_spin_unlock(&lock->wait_lock);
- ret = deadlock_detect ? -EDEADLK : 0;
+ ret = -EDEADLK;
goto out_unlock_pi;
}
__rt_mutex_adjust_prio(task);
}
+ /*
+ * Check whether the task which owns the current lock is pi
+ * blocked itself. If yes we store a pointer to the lock for
+ * the lock chain change detection above. After we dropped
+ * task->pi_lock next_lock cannot be dereferenced anymore.
+ */
+ next_lock = task_blocked_on_lock(task);
+
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
top_waiter = rt_mutex_top_waiter(lock);
raw_spin_unlock(&lock->wait_lock);
+ /*
+ * We reached the end of the lock chain. Stop right here. No
+ * point to go back just to figure that out.
+ */
+ if (!next_lock)
+ goto out_put_task;
+
if (!detect_deadlock && waiter != top_waiter)
goto out_put_task;
{
struct task_struct *owner = rt_mutex_owner(lock);
struct rt_mutex_waiter *top_waiter = waiter;
- unsigned long flags;
+ struct rt_mutex *next_lock;
int chain_walk = 0, res;
+ unsigned long flags;
+
+ /*
+ * Early deadlock detection. We really don't want the task to
+ * enqueue on itself just to untangle the mess later. It's not
+ * only an optimization. We drop the locks, so another waiter
+ * can come in before the chain walk detects the deadlock. So
+ * the other will detect the deadlock and return -EDEADLOCK,
+ * which is wrong, as the other waiter is not in a deadlock
+ * situation.
+ */
+ if (owner == task)
+ return -EDEADLK;
raw_spin_lock_irqsave(&task->pi_lock, flags);
__rt_mutex_adjust_prio(task);
if (!owner)
return 0;
+ raw_spin_lock_irqsave(&owner->pi_lock, flags);
if (waiter == rt_mutex_top_waiter(lock)) {
- raw_spin_lock_irqsave(&owner->pi_lock, flags);
plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
__rt_mutex_adjust_prio(owner);
if (owner->pi_blocked_on)
chain_walk = 1;
- raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
- }
- else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
+ } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
chain_walk = 1;
+ }
+
+ /* Store the lock on which owner is blocked or NULL */
+ next_lock = task_blocked_on_lock(owner);
- if (!chain_walk)
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+ /*
+ * Even if full deadlock detection is on, if the owner is not
+ * blocked itself, we can avoid finding this out in the chain
+ * walk.
+ */
+ if (!chain_walk || !next_lock)
return 0;
/*
raw_spin_unlock(&lock->wait_lock);
- res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
- task);
+ res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
+ next_lock, waiter, task);
raw_spin_lock(&lock->wait_lock);
/*
* Wake up the next waiter on the lock.
*
- * Remove the top waiter from the current tasks waiter list and wake it up.
+ * Remove the top waiter from the current tasks pi waiter list and
+ * wake it up.
*
* Called with lock->wait_lock held.
*/
*/
plist_del(&waiter->pi_list_entry, ¤t->pi_waiters);
- rt_mutex_set_owner(lock, NULL);
+ /*
+ * As we are waking up the top waiter, and the waiter stays
+ * queued on the lock until it gets the lock, this lock
+ * obviously has waiters. Just set the bit here and this has
+ * the added benefit of forcing all new tasks into the
+ * slow path making sure no task of lower priority than
+ * the top waiter can steal this lock.
+ */
+ lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
+ /*
+ * It's safe to dereference waiter as it cannot go away as
+ * long as we hold lock->wait_lock. The waiter task needs to
+ * acquire it in order to dequeue the waiter.
+ */
wake_up_process(waiter->task);
}
{
int first = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
+ struct rt_mutex *next_lock = NULL;
unsigned long flags;
- int chain_walk = 0;
raw_spin_lock_irqsave(¤t->pi_lock, flags);
plist_del(&waiter->list_entry, &lock->wait_list);
}
__rt_mutex_adjust_prio(owner);
- if (owner->pi_blocked_on)
- chain_walk = 1;
+ /* Store the lock on which owner is blocked or NULL */
+ next_lock = task_blocked_on_lock(owner);
raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
- if (!chain_walk)
+ if (!next_lock)
return;
/* gets dropped in rt_mutex_adjust_prio_chain()! */
raw_spin_unlock(&lock->wait_lock);
- rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
+ rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
raw_spin_lock(&lock->wait_lock);
}
void rt_mutex_adjust_pi(struct task_struct *task)
{
struct rt_mutex_waiter *waiter;
+ struct rt_mutex *next_lock;
unsigned long flags;
raw_spin_lock_irqsave(&task->pi_lock, flags);
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
-
+ next_lock = waiter->lock;
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(task);
- rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
+
+ rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
}
/**
return ret;
}
+static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
+ struct rt_mutex_waiter *w)
+{
+ /*
+ * If the result is not -EDEADLOCK or the caller requested
+ * deadlock detection, nothing to do here.
+ */
+ if (res != -EDEADLOCK || detect_deadlock)
+ return;
+
+ /*
+ * Yell lowdly and stop the task right here.
+ */
+ rt_mutex_print_deadlock(w);
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+}
+
/*
* Slow path lock function:
*/
set_current_state(TASK_RUNNING);
- if (unlikely(ret))
+ if (unlikely(ret)) {
remove_waiter(lock, &waiter);
+ rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
+ }
/*
* try_to_take_rt_mutex() sets the waiter bit
rt_mutex_deadlock_account_unlock(current);
- if (!rt_mutex_has_waiters(lock)) {
- lock->owner = NULL;
- raw_spin_unlock(&lock->wait_lock);
- return;
+ /*
+ * We must be careful here if the fast path is enabled. If we
+ * have no waiters queued we cannot set owner to NULL here
+ * because of:
+ *
+ * foo->lock->owner = NULL;
+ * rtmutex_lock(foo->lock); <- fast path
+ * free = atomic_dec_and_test(foo->refcnt);
+ * rtmutex_unlock(foo->lock); <- fast path
+ * if (free)
+ * kfree(foo);
+ * raw_spin_unlock(foo->lock->wait_lock);
+ *
+ * So for the fastpath enabled kernel:
+ *
+ * Nothing can set the waiters bit as long as we hold
+ * lock->wait_lock. So we do the following sequence:
+ *
+ * owner = rt_mutex_owner(lock);
+ * clear_rt_mutex_waiters(lock);
+ * raw_spin_unlock(&lock->wait_lock);
+ * if (cmpxchg(&lock->owner, owner, 0) == owner)
+ * return;
+ * goto retry;
+ *
+ * The fastpath disabled variant is simple as all access to
+ * lock->owner is serialized by lock->wait_lock:
+ *
+ * lock->owner = NULL;
+ * raw_spin_unlock(&lock->wait_lock);
+ */
+ while (!rt_mutex_has_waiters(lock)) {
+ /* Drops lock->wait_lock ! */
+ if (unlock_rt_mutex_safe(lock) == true)
+ return;
+ /* Relock the rtmutex and try again */
+ raw_spin_lock(&lock->wait_lock);
}
+ /*
+ * The wakeup next waiter path does not suffer from the above
+ * race. See the comments there.
+ */
wakeup_next_waiter(lock);
raw_spin_unlock(&lock->wait_lock);
return 1;
}
- ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
+ /* We enforce deadlock detection for futexes */
+ ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
if (ret && !rt_mutex_owner(lock)) {
/*
#define debug_rt_mutex_print_deadlock(w) do { } while (0)
#define debug_rt_mutex_detect_deadlock(w,d) (d)
#define debug_rt_mutex_reset_waiter(w) do { } while (0)
+
+static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
+{
+ WARN(1, "rtmutex deadlock detected\n");
+}
int idx = 0;
int task_pri = convert_prio(p->prio);
- if (task_pri >= MAX_RT_PRIO)
- return 0;
+ BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
for (idx = 0; idx < task_pri; idx++) {
struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
bit = find_last_bit(&mask, BITS_PER_LONG);
- mask = (1 << bit) - 1;
+ mask = (1UL << bit) - 1;
expires_limit = expires_limit & ~(mask);
ftrace_process_locs(mod, start, end);
}
-static int ftrace_module_notify_enter(struct notifier_block *self,
- unsigned long val, void *data)
+void ftrace_module_init(struct module *mod)
{
- struct module *mod = data;
-
- if (val == MODULE_STATE_COMING)
- ftrace_init_module(mod, mod->ftrace_callsites,
- mod->ftrace_callsites +
- mod->num_ftrace_callsites);
- return 0;
+ ftrace_init_module(mod, mod->ftrace_callsites,
+ mod->ftrace_callsites +
+ mod->num_ftrace_callsites);
}
static int ftrace_module_notify_exit(struct notifier_block *self,
return 0;
}
#else
-static int ftrace_module_notify_enter(struct notifier_block *self,
- unsigned long val, void *data)
-{
- return 0;
-}
static int ftrace_module_notify_exit(struct notifier_block *self,
unsigned long val, void *data)
{
}
#endif /* CONFIG_MODULES */
-struct notifier_block ftrace_module_enter_nb = {
- .notifier_call = ftrace_module_notify_enter,
- .priority = INT_MAX, /* Run before anything that can use kprobes */
-};
-
struct notifier_block ftrace_module_exit_nb = {
.notifier_call = ftrace_module_notify_exit,
.priority = INT_MIN, /* Run after anything that can remove kprobes */
__start_mcount_loc,
__stop_mcount_loc);
- ret = register_module_notifier(&ftrace_module_enter_nb);
- if (ret)
- pr_warning("Failed to register trace ftrace module enter notifier\n");
-
ret = register_module_notifier(&ftrace_module_exit_nb);
if (ret)
pr_warning("Failed to register trace ftrace module exit notifier\n");
struct tp_module *tp_mod, *iter;
int ret = 0;
+ if (!mod->num_tracepoints)
+ return 0;
+
/*
* We skip modules that taint the kernel, especially those with different
* module headers (for forced load), to make sure we don't cause a crash.
{
struct tp_module *pos;
+ if (!mod->num_tracepoints)
+ return 0;
+
mutex_lock(&tracepoints_mutex);
tracepoint_update_probe_range(mod->tracepoints_ptrs,
mod->tracepoints_ptrs + mod->num_tracepoints);
*/
#ifdef STATIC
-#include "lzo/lzo1x_decompress.c"
+#include "lzo/lzo1x_decompress_safe.c"
#else
#include <linux/decompress/unlzo.h>
#endif
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
/* if already at the top layer, we need to grow */
- if (id >= 1 << (idp->layers * IDR_BITS)) {
+ if (id > idr_max(idp->layers)) {
*starting_id = id;
return IDR_NEED_TO_GROW;
}
if (!p)
return ERR_PTR(-EINVAL);
- n = (p->layer+1) * IDR_BITS;
-
id &= MAX_ID_MASK;
- if (id >= (1 << n))
+ if (id > idr_max(p->layer + 1))
return ERR_PTR(-EINVAL);
- n -= IDR_BITS;
+ n = p->layer * IDR_BITS;
while ((n > 0) && p) {
p = p->ary[(id >> n) & IDR_MASK];
n -= IDR_BITS;
lzo_compress-objs := lzo1x_compress.o
-lzo_decompress-objs := lzo1x_decompress.o
+lzo_decompress-objs := lzo1x_decompress_safe.o
obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o
obj-$(CONFIG_LZO_DECOMPRESS) += lzo_decompress.o
/*
- * LZO1X Compressor from MiniLZO
+ * LZO1X Compressor from LZO
*
- * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com>
+ * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
*
* The full LZO package can be found at:
* http://www.oberhumer.com/opensource/lzo/
*
- * Changed for kernel use by:
+ * Changed for Linux kernel use by:
* Nitin Gupta <nitingupta910@gmail.com>
* Richard Purdie <rpurdie@openedhand.com>
*/
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/lzo.h>
#include <asm/unaligned.h>
+#include <linux/lzo.h>
#include "lzodefs.h"
static noinline size_t
-_lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
- unsigned char *out, size_t *out_len, void *wrkmem)
+lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len,
+ size_t ti, void *wrkmem)
{
+ const unsigned char *ip;
+ unsigned char *op;
const unsigned char * const in_end = in + in_len;
- const unsigned char * const ip_end = in + in_len - M2_MAX_LEN - 5;
- const unsigned char ** const dict = wrkmem;
- const unsigned char *ip = in, *ii = ip;
- const unsigned char *end, *m, *m_pos;
- size_t m_off, m_len, dindex;
- unsigned char *op = out;
+ const unsigned char * const ip_end = in + in_len - 20;
+ const unsigned char *ii;
+ lzo_dict_t * const dict = (lzo_dict_t *) wrkmem;
- ip += 4;
+ op = out;
+ ip = in;
+ ii = ip;
+ ip += ti < 4 ? 4 - ti : 0;
for (;;) {
- dindex = ((size_t)(0x21 * DX3(ip, 5, 5, 6)) >> 5) & D_MASK;
- m_pos = dict[dindex];
-
- if (m_pos < in)
- goto literal;
-
- if (ip == m_pos || ((size_t)(ip - m_pos) > M4_MAX_OFFSET))
- goto literal;
-
- m_off = ip - m_pos;
- if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3])
- goto try_match;
-
- dindex = (dindex & (D_MASK & 0x7ff)) ^ (D_HIGH | 0x1f);
- m_pos = dict[dindex];
-
- if (m_pos < in)
- goto literal;
-
- if (ip == m_pos || ((size_t)(ip - m_pos) > M4_MAX_OFFSET))
- goto literal;
-
- m_off = ip - m_pos;
- if (m_off <= M2_MAX_OFFSET || m_pos[3] == ip[3])
- goto try_match;
-
- goto literal;
-
-try_match:
- if (get_unaligned((const unsigned short *)m_pos)
- == get_unaligned((const unsigned short *)ip)) {
- if (likely(m_pos[2] == ip[2]))
- goto match;
- }
-
+ const unsigned char *m_pos;
+ size_t t, m_len, m_off;
+ u32 dv;
literal:
- dict[dindex] = ip;
- ++ip;
+ ip += 1 + ((ip - ii) >> 5);
+next:
if (unlikely(ip >= ip_end))
break;
- continue;
-
-match:
- dict[dindex] = ip;
- if (ip != ii) {
- size_t t = ip - ii;
+ dv = get_unaligned_le32(ip);
+ t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK;
+ m_pos = in + dict[t];
+ dict[t] = (lzo_dict_t) (ip - in);
+ if (unlikely(dv != get_unaligned_le32(m_pos)))
+ goto literal;
+ ii -= ti;
+ ti = 0;
+ t = ip - ii;
+ if (t != 0) {
if (t <= 3) {
op[-2] |= t;
- } else if (t <= 18) {
+ COPY4(op, ii);
+ op += t;
+ } else if (t <= 16) {
*op++ = (t - 3);
+ COPY8(op, ii);
+ COPY8(op + 8, ii + 8);
+ op += t;
} else {
- size_t tt = t - 18;
-
- *op++ = 0;
- while (tt > 255) {
- tt -= 255;
+ if (t <= 18) {
+ *op++ = (t - 3);
+ } else {
+ size_t tt = t - 18;
*op++ = 0;
+ while (unlikely(tt > 255)) {
+ tt -= 255;
+ *op++ = 0;
+ }
+ *op++ = tt;
}
- *op++ = tt;
+ do {
+ COPY8(op, ii);
+ COPY8(op + 8, ii + 8);
+ op += 16;
+ ii += 16;
+ t -= 16;
+ } while (t >= 16);
+ if (t > 0) do {
+ *op++ = *ii++;
+ } while (--t > 0);
}
- do {
- *op++ = *ii++;
- } while (--t > 0);
}
- ip += 3;
- if (m_pos[3] != *ip++ || m_pos[4] != *ip++
- || m_pos[5] != *ip++ || m_pos[6] != *ip++
- || m_pos[7] != *ip++ || m_pos[8] != *ip++) {
- --ip;
- m_len = ip - ii;
+ m_len = 4;
+ {
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64)
+ u64 v;
+ v = get_unaligned((const u64 *) (ip + m_len)) ^
+ get_unaligned((const u64 *) (m_pos + m_len));
+ if (unlikely(v == 0)) {
+ do {
+ m_len += 8;
+ v = get_unaligned((const u64 *) (ip + m_len)) ^
+ get_unaligned((const u64 *) (m_pos + m_len));
+ if (unlikely(ip + m_len >= ip_end))
+ goto m_len_done;
+ } while (v == 0);
+ }
+# if defined(__LITTLE_ENDIAN)
+ m_len += (unsigned) __builtin_ctzll(v) / 8;
+# elif defined(__BIG_ENDIAN)
+ m_len += (unsigned) __builtin_clzll(v) / 8;
+# else
+# error "missing endian definition"
+# endif
+#elif defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ32)
+ u32 v;
+ v = get_unaligned((const u32 *) (ip + m_len)) ^
+ get_unaligned((const u32 *) (m_pos + m_len));
+ if (unlikely(v == 0)) {
+ do {
+ m_len += 4;
+ v = get_unaligned((const u32 *) (ip + m_len)) ^
+ get_unaligned((const u32 *) (m_pos + m_len));
+ if (v != 0)
+ break;
+ m_len += 4;
+ v = get_unaligned((const u32 *) (ip + m_len)) ^
+ get_unaligned((const u32 *) (m_pos + m_len));
+ if (unlikely(ip + m_len >= ip_end))
+ goto m_len_done;
+ } while (v == 0);
+ }
+# if defined(__LITTLE_ENDIAN)
+ m_len += (unsigned) __builtin_ctz(v) / 8;
+# elif defined(__BIG_ENDIAN)
+ m_len += (unsigned) __builtin_clz(v) / 8;
+# else
+# error "missing endian definition"
+# endif
+#else
+ if (unlikely(ip[m_len] == m_pos[m_len])) {
+ do {
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (unlikely(ip + m_len >= ip_end))
+ goto m_len_done;
+ } while (ip[m_len] == m_pos[m_len]);
+ }
+#endif
+ }
+m_len_done:
- if (m_off <= M2_MAX_OFFSET) {
- m_off -= 1;
- *op++ = (((m_len - 1) << 5)
- | ((m_off & 7) << 2));
- *op++ = (m_off >> 3);
- } else if (m_off <= M3_MAX_OFFSET) {
- m_off -= 1;
+ m_off = ip - m_pos;
+ ip += m_len;
+ ii = ip;
+ if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) {
+ m_off -= 1;
+ *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2));
+ *op++ = (m_off >> 3);
+ } else if (m_off <= M3_MAX_OFFSET) {
+ m_off -= 1;
+ if (m_len <= M3_MAX_LEN)
*op++ = (M3_MARKER | (m_len - 2));
- goto m3_m4_offset;
- } else {
- m_off -= 0x4000;
-
- *op++ = (M4_MARKER | ((m_off & 0x4000) >> 11)
- | (m_len - 2));
- goto m3_m4_offset;
+ else {
+ m_len -= M3_MAX_LEN;
+ *op++ = M3_MARKER | 0;
+ while (unlikely(m_len > 255)) {
+ m_len -= 255;
+ *op++ = 0;
+ }
+ *op++ = (m_len);
}
+ *op++ = (m_off << 2);
+ *op++ = (m_off >> 6);
} else {
- end = in_end;
- m = m_pos + M2_MAX_LEN + 1;
-
- while (ip < end && *m == *ip) {
- m++;
- ip++;
- }
- m_len = ip - ii;
-
- if (m_off <= M3_MAX_OFFSET) {
- m_off -= 1;
- if (m_len <= 33) {
- *op++ = (M3_MARKER | (m_len - 2));
- } else {
- m_len -= 33;
- *op++ = M3_MARKER | 0;
- goto m3_m4_len;
- }
- } else {
- m_off -= 0x4000;
- if (m_len <= M4_MAX_LEN) {
- *op++ = (M4_MARKER
- | ((m_off & 0x4000) >> 11)
+ m_off -= 0x4000;
+ if (m_len <= M4_MAX_LEN)
+ *op++ = (M4_MARKER | ((m_off >> 11) & 8)
| (m_len - 2));
- } else {
- m_len -= M4_MAX_LEN;
- *op++ = (M4_MARKER
- | ((m_off & 0x4000) >> 11));
-m3_m4_len:
- while (m_len > 255) {
- m_len -= 255;
- *op++ = 0;
- }
-
- *op++ = (m_len);
+ else {
+ m_len -= M4_MAX_LEN;
+ *op++ = (M4_MARKER | ((m_off >> 11) & 8));
+ while (unlikely(m_len > 255)) {
+ m_len -= 255;
+ *op++ = 0;
}
+ *op++ = (m_len);
}
-m3_m4_offset:
- *op++ = ((m_off & 63) << 2);
+ *op++ = (m_off << 2);
*op++ = (m_off >> 6);
}
-
- ii = ip;
- if (unlikely(ip >= ip_end))
- break;
+ goto next;
}
-
*out_len = op - out;
- return in_end - ii;
+ return in_end - (ii - ti);
}
-int lzo1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out,
- size_t *out_len, void *wrkmem)
+int lzo1x_1_compress(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len,
+ void *wrkmem)
{
- const unsigned char *ii;
+ const unsigned char *ip = in;
unsigned char *op = out;
- size_t t;
+ size_t l = in_len;
+ size_t t = 0;
- if (unlikely(in_len <= M2_MAX_LEN + 5)) {
- t = in_len;
- } else {
- t = _lzo1x_1_do_compress(in, in_len, op, out_len, wrkmem);
+ while (l > 20) {
+ size_t ll = l <= (M4_MAX_OFFSET + 1) ? l : (M4_MAX_OFFSET + 1);
+ uintptr_t ll_end = (uintptr_t) ip + ll;
+ if ((ll_end + ((t + ll) >> 5)) <= ll_end)
+ break;
+ BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS);
+ memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t));
+ t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem);
+ ip += ll;
op += *out_len;
+ l -= ll;
}
+ t += l;
if (t > 0) {
- ii = in + in_len - t;
+ const unsigned char *ii = in + in_len - t;
if (op == out && t <= 238) {
*op++ = (17 + t);
*op++ = (t - 3);
} else {
size_t tt = t - 18;
-
*op++ = 0;
while (tt > 255) {
tt -= 255;
*op++ = 0;
}
-
*op++ = tt;
}
- do {
+ if (t >= 16) do {
+ COPY8(op, ii);
+ COPY8(op + 8, ii + 8);
+ op += 16;
+ ii += 16;
+ t -= 16;
+ } while (t >= 16);
+ if (t > 0) do {
*op++ = *ii++;
} while (--t > 0);
}
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LZO1X-1 Compressor");
-
+++ /dev/null
-/*
- * LZO1X Decompressor from MiniLZO
- *
- * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com>
- *
- * The full LZO package can be found at:
- * http://www.oberhumer.com/opensource/lzo/
- *
- * Changed for kernel use by:
- * Nitin Gupta <nitingupta910@gmail.com>
- * Richard Purdie <rpurdie@openedhand.com>
- */
-
-#ifndef STATIC
-#include <linux/module.h>
-#include <linux/kernel.h>
-#endif
-
-#include <asm/unaligned.h>
-#include <linux/lzo.h>
-#include "lzodefs.h"
-
-#define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x))
-#define HAVE_OP(x, op_end, op) ((size_t)(op_end - op) < (x))
-#define HAVE_LB(m_pos, out, op) (m_pos < out || m_pos >= op)
-
-#define COPY4(dst, src) \
- put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst))
-
-int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
- unsigned char *out, size_t *out_len)
-{
- const unsigned char * const ip_end = in + in_len;
- unsigned char * const op_end = out + *out_len;
- const unsigned char *ip = in, *m_pos;
- unsigned char *op = out;
- size_t t;
-
- *out_len = 0;
-
- if (*ip > 17) {
- t = *ip++ - 17;
- if (t < 4)
- goto match_next;
- if (HAVE_OP(t, op_end, op))
- goto output_overrun;
- if (HAVE_IP(t + 1, ip_end, ip))
- goto input_overrun;
- do {
- *op++ = *ip++;
- } while (--t > 0);
- goto first_literal_run;
- }
-
- while ((ip < ip_end)) {
- t = *ip++;
- if (t >= 16)
- goto match;
- if (t == 0) {
- if (HAVE_IP(1, ip_end, ip))
- goto input_overrun;
- while (*ip == 0) {
- t += 255;
- ip++;
- if (HAVE_IP(1, ip_end, ip))
- goto input_overrun;
- }
- t += 15 + *ip++;
- }
- if (HAVE_OP(t + 3, op_end, op))
- goto output_overrun;
- if (HAVE_IP(t + 4, ip_end, ip))
- goto input_overrun;
-
- COPY4(op, ip);
- op += 4;
- ip += 4;
- if (--t > 0) {
- if (t >= 4) {
- do {
- COPY4(op, ip);
- op += 4;
- ip += 4;
- t -= 4;
- } while (t >= 4);
- if (t > 0) {
- do {
- *op++ = *ip++;
- } while (--t > 0);
- }
- } else {
- do {
- *op++ = *ip++;
- } while (--t > 0);
- }
- }
-
-first_literal_run:
- t = *ip++;
- if (t >= 16)
- goto match;
- m_pos = op - (1 + M2_MAX_OFFSET);
- m_pos -= t >> 2;
- m_pos -= *ip++ << 2;
-
- if (HAVE_LB(m_pos, out, op))
- goto lookbehind_overrun;
-
- if (HAVE_OP(3, op_end, op))
- goto output_overrun;
- *op++ = *m_pos++;
- *op++ = *m_pos++;
- *op++ = *m_pos;
-
- goto match_done;
-
- do {
-match:
- if (t >= 64) {
- m_pos = op - 1;
- m_pos -= (t >> 2) & 7;
- m_pos -= *ip++ << 3;
- t = (t >> 5) - 1;
- if (HAVE_LB(m_pos, out, op))
- goto lookbehind_overrun;
- if (HAVE_OP(t + 3 - 1, op_end, op))
- goto output_overrun;
- goto copy_match;
- } else if (t >= 32) {
- t &= 31;
- if (t == 0) {
- if (HAVE_IP(1, ip_end, ip))
- goto input_overrun;
- while (*ip == 0) {
- t += 255;
- ip++;
- if (HAVE_IP(1, ip_end, ip))
- goto input_overrun;
- }
- t += 31 + *ip++;
- }
- m_pos = op - 1;
- m_pos -= get_unaligned_le16(ip) >> 2;
- ip += 2;
- } else if (t >= 16) {
- m_pos = op;
- m_pos -= (t & 8) << 11;
-
- t &= 7;
- if (t == 0) {
- if (HAVE_IP(1, ip_end, ip))
- goto input_overrun;
- while (*ip == 0) {
- t += 255;
- ip++;
- if (HAVE_IP(1, ip_end, ip))
- goto input_overrun;
- }
- t += 7 + *ip++;
- }
- m_pos -= get_unaligned_le16(ip) >> 2;
- ip += 2;
- if (m_pos == op)
- goto eof_found;
- m_pos -= 0x4000;
- } else {
- m_pos = op - 1;
- m_pos -= t >> 2;
- m_pos -= *ip++ << 2;
-
- if (HAVE_LB(m_pos, out, op))
- goto lookbehind_overrun;
- if (HAVE_OP(2, op_end, op))
- goto output_overrun;
-
- *op++ = *m_pos++;
- *op++ = *m_pos;
- goto match_done;
- }
-
- if (HAVE_LB(m_pos, out, op))
- goto lookbehind_overrun;
- if (HAVE_OP(t + 3 - 1, op_end, op))
- goto output_overrun;
-
- if (t >= 2 * 4 - (3 - 1) && (op - m_pos) >= 4) {
- COPY4(op, m_pos);
- op += 4;
- m_pos += 4;
- t -= 4 - (3 - 1);
- do {
- COPY4(op, m_pos);
- op += 4;
- m_pos += 4;
- t -= 4;
- } while (t >= 4);
- if (t > 0)
- do {
- *op++ = *m_pos++;
- } while (--t > 0);
- } else {
-copy_match:
- *op++ = *m_pos++;
- *op++ = *m_pos++;
- do {
- *op++ = *m_pos++;
- } while (--t > 0);
- }
-match_done:
- t = ip[-2] & 3;
- if (t == 0)
- break;
-match_next:
- if (HAVE_OP(t, op_end, op))
- goto output_overrun;
- if (HAVE_IP(t + 1, ip_end, ip))
- goto input_overrun;
-
- *op++ = *ip++;
- if (t > 1) {
- *op++ = *ip++;
- if (t > 2)
- *op++ = *ip++;
- }
-
- t = *ip++;
- } while (ip < ip_end);
- }
-
- *out_len = op - out;
- return LZO_E_EOF_NOT_FOUND;
-
-eof_found:
- *out_len = op - out;
- return (ip == ip_end ? LZO_E_OK :
- (ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN));
-input_overrun:
- *out_len = op - out;
- return LZO_E_INPUT_OVERRUN;
-
-output_overrun:
- *out_len = op - out;
- return LZO_E_OUTPUT_OVERRUN;
-
-lookbehind_overrun:
- *out_len = op - out;
- return LZO_E_LOOKBEHIND_OVERRUN;
-}
-#ifndef STATIC
-EXPORT_SYMBOL_GPL(lzo1x_decompress_safe);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("LZO1X Decompressor");
-
-#endif
--- /dev/null
+/*
+ * LZO1X Decompressor from LZO
+ *
+ * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
+ *
+ * The full LZO package can be found at:
+ * http://www.oberhumer.com/opensource/lzo/
+ *
+ * Changed for Linux kernel use by:
+ * Nitin Gupta <nitingupta910@gmail.com>
+ * Richard Purdie <rpurdie@openedhand.com>
+ */
+
+#ifndef STATIC
+#include <linux/module.h>
+#include <linux/kernel.h>
+#endif
+#include <asm/unaligned.h>
+#include <linux/lzo.h>
+#include "lzodefs.h"
+
+#define HAVE_IP(t, x) \
+ (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
+ (((t + x) >= t) && ((t + x) >= x)))
+
+#define HAVE_OP(t, x) \
+ (((size_t)(op_end - op) >= (size_t)(t + x)) && \
+ (((t + x) >= t) && ((t + x) >= x)))
+
+#define NEED_IP(t, x) \
+ do { \
+ if (!HAVE_IP(t, x)) \
+ goto input_overrun; \
+ } while (0)
+
+#define NEED_OP(t, x) \
+ do { \
+ if (!HAVE_OP(t, x)) \
+ goto output_overrun; \
+ } while (0)
+
+#define TEST_LB(m_pos) \
+ do { \
+ if ((m_pos) < out) \
+ goto lookbehind_overrun; \
+ } while (0)
+
+int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len)
+{
+ unsigned char *op;
+ const unsigned char *ip;
+ size_t t, next;
+ size_t state = 0;
+ const unsigned char *m_pos;
+ const unsigned char * const ip_end = in + in_len;
+ unsigned char * const op_end = out + *out_len;
+
+ op = out;
+ ip = in;
+
+ if (unlikely(in_len < 3))
+ goto input_overrun;
+ if (*ip > 17) {
+ t = *ip++ - 17;
+ if (t < 4) {
+ next = t;
+ goto match_next;
+ }
+ goto copy_literal_run;
+ }
+
+ for (;;) {
+ t = *ip++;
+ if (t < 16) {
+ if (likely(state == 0)) {
+ if (unlikely(t == 0)) {
+ while (unlikely(*ip == 0)) {
+ t += 255;
+ ip++;
+ NEED_IP(1, 0);
+ }
+ t += 15 + *ip++;
+ }
+ t += 3;
+copy_literal_run:
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
+ const unsigned char *ie = ip + t;
+ unsigned char *oe = op + t;
+ do {
+ COPY8(op, ip);
+ op += 8;
+ ip += 8;
+ COPY8(op, ip);
+ op += 8;
+ ip += 8;
+ } while (ip < ie);
+ ip = ie;
+ op = oe;
+ } else
+#endif
+ {
+ NEED_OP(t, 0);
+ NEED_IP(t, 3);
+ do {
+ *op++ = *ip++;
+ } while (--t > 0);
+ }
+ state = 4;
+ continue;
+ } else if (state != 4) {
+ next = t & 3;
+ m_pos = op - 1;
+ m_pos -= t >> 2;
+ m_pos -= *ip++ << 2;
+ TEST_LB(m_pos);
+ NEED_OP(2, 0);
+ op[0] = m_pos[0];
+ op[1] = m_pos[1];
+ op += 2;
+ goto match_next;
+ } else {
+ next = t & 3;
+ m_pos = op - (1 + M2_MAX_OFFSET);
+ m_pos -= t >> 2;
+ m_pos -= *ip++ << 2;
+ t = 3;
+ }
+ } else if (t >= 64) {
+ next = t & 3;
+ m_pos = op - 1;
+ m_pos -= (t >> 2) & 7;
+ m_pos -= *ip++ << 3;
+ t = (t >> 5) - 1 + (3 - 1);
+ } else if (t >= 32) {
+ t = (t & 31) + (3 - 1);
+ if (unlikely(t == 2)) {
+ while (unlikely(*ip == 0)) {
+ t += 255;
+ ip++;
+ NEED_IP(1, 0);
+ }
+ t += 31 + *ip++;
+ NEED_IP(2, 0);
+ }
+ m_pos = op - 1;
+ next = get_unaligned_le16(ip);
+ ip += 2;
+ m_pos -= next >> 2;
+ next &= 3;
+ } else {
+ m_pos = op;
+ m_pos -= (t & 8) << 11;
+ t = (t & 7) + (3 - 1);
+ if (unlikely(t == 2)) {
+ while (unlikely(*ip == 0)) {
+ t += 255;
+ ip++;
+ NEED_IP(1, 0);
+ }
+ t += 7 + *ip++;
+ NEED_IP(2, 0);
+ }
+ next = get_unaligned_le16(ip);
+ ip += 2;
+ m_pos -= next >> 2;
+ next &= 3;
+ if (m_pos == op)
+ goto eof_found;
+ m_pos -= 0x4000;
+ }
+ TEST_LB(m_pos);
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ if (op - m_pos >= 8) {
+ unsigned char *oe = op + t;
+ if (likely(HAVE_OP(t, 15))) {
+ do {
+ COPY8(op, m_pos);
+ op += 8;
+ m_pos += 8;
+ COPY8(op, m_pos);
+ op += 8;
+ m_pos += 8;
+ } while (op < oe);
+ op = oe;
+ if (HAVE_IP(6, 0)) {
+ state = next;
+ COPY4(op, ip);
+ op += next;
+ ip += next;
+ continue;
+ }
+ } else {
+ NEED_OP(t, 0);
+ do {
+ *op++ = *m_pos++;
+ } while (op < oe);
+ }
+ } else
+#endif
+ {
+ unsigned char *oe = op + t;
+ NEED_OP(t, 0);
+ op[0] = m_pos[0];
+ op[1] = m_pos[1];
+ op += 2;
+ m_pos += 2;
+ do {
+ *op++ = *m_pos++;
+ } while (op < oe);
+ }
+match_next:
+ state = next;
+ t = next;
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
+ COPY4(op, ip);
+ op += t;
+ ip += t;
+ } else
+#endif
+ {
+ NEED_IP(t, 3);
+ NEED_OP(t, 0);
+ while (t > 0) {
+ *op++ = *ip++;
+ t--;
+ }
+ }
+ }
+
+eof_found:
+ *out_len = op - out;
+ return (t != 3 ? LZO_E_ERROR :
+ ip == ip_end ? LZO_E_OK :
+ ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN);
+
+input_overrun:
+ *out_len = op - out;
+ return LZO_E_INPUT_OVERRUN;
+
+output_overrun:
+ *out_len = op - out;
+ return LZO_E_OUTPUT_OVERRUN;
+
+lookbehind_overrun:
+ *out_len = op - out;
+ return LZO_E_LOOKBEHIND_OVERRUN;
+}
+#ifndef STATIC
+EXPORT_SYMBOL_GPL(lzo1x_decompress_safe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LZO1X Decompressor");
+
+#endif
/*
* lzodefs.h -- architecture, OS and compiler specific defines
*
- * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer <markus@oberhumer.com>
+ * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
*
* The full LZO package can be found at:
* http://www.oberhumer.com/opensource/lzo/
*
- * Changed for kernel use by:
+ * Changed for Linux kernel use by:
* Nitin Gupta <nitingupta910@gmail.com>
* Richard Purdie <rpurdie@openedhand.com>
*/
-#define LZO_VERSION 0x2020
-#define LZO_VERSION_STRING "2.02"
-#define LZO_VERSION_DATE "Oct 17 2005"
+
+#define COPY4(dst, src) \
+ put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst))
+#if defined(__x86_64__)
+#define COPY8(dst, src) \
+ put_unaligned(get_unaligned((const u64 *)(src)), (u64 *)(dst))
+#else
+#define COPY8(dst, src) \
+ COPY4(dst, src); COPY4((dst) + 4, (src) + 4)
+#endif
+
+#if defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)
+#error "conflicting endian definitions"
+#elif defined(__x86_64__)
+#define LZO_USE_CTZ64 1
+#define LZO_USE_CTZ32 1
+#elif defined(__i386__) || defined(__powerpc__)
+#define LZO_USE_CTZ32 1
+#elif defined(__arm__) && (__LINUX_ARM_ARCH__ >= 5)
+#define LZO_USE_CTZ32 1
+#endif
#define M1_MAX_OFFSET 0x0400
#define M2_MAX_OFFSET 0x0800
#define M3_MARKER 32
#define M4_MARKER 16
-#define D_BITS 14
-#define D_MASK ((1u << D_BITS) - 1)
+#define lzo_dict_t unsigned short
+#define D_BITS 13
+#define D_SIZE (1u << D_BITS)
+#define D_MASK (D_SIZE - 1)
#define D_HIGH ((D_MASK >> 1) + 1)
-
-#define DX2(p, s1, s2) (((((size_t)((p)[2]) << (s2)) ^ (p)[1]) \
- << (s1)) ^ (p)[0])
-#define DX3(p, s1, s2, s3) ((DX2((p)+1, s2, s3) << (s1)) ^ (p)[0])
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/string.h>
+#include <linux/ratelimit.h>
+#include <linux/sched.h>
#include <linux/types.h>
#include <net/netlink.h>
}
if (unlikely(rem > 0))
- printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
- "attributes.\n", rem);
+ pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
+ rem, current->comm);
err = 0;
errout:
{
unsigned long addr = (unsigned long)vaddr;
- if (addr >= PKMAP_ADDR(0) && addr <= PKMAP_ADDR(LAST_PKMAP)) {
+ if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT;
return pte_page(pkmap_page_table[i]);
}
while (nr_pages--) {
if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
break;
+ cond_resched_lock(&hugetlb_lock);
}
}
update_mmu_cache(vma, address, ptep);
}
+static int is_hugetlb_entry_migration(pte_t pte)
+{
+ swp_entry_t swp;
+
+ if (huge_pte_none(pte) || pte_present(pte))
+ return 0;
+ swp = pte_to_swp_entry(pte);
+ if (non_swap_entry(swp) && is_migration_entry(swp))
+ return 1;
+ else
+ return 0;
+}
+
+static int is_hugetlb_entry_hwpoisoned(pte_t pte)
+{
+ swp_entry_t swp;
+
+ if (huge_pte_none(pte) || pte_present(pte))
+ return 0;
+ swp = pte_to_swp_entry(pte);
+ if (non_swap_entry(swp) && is_hwpoison_entry(swp))
+ return 1;
+ else
+ return 0;
+}
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
spin_lock(&dst->page_table_lock);
spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
- if (!huge_pte_none(huge_ptep_get(src_pte))) {
+ entry = huge_ptep_get(src_pte);
+ if (huge_pte_none(entry)) { /* skip none entry */
+ ;
+ } else if (unlikely(is_hugetlb_entry_migration(entry) ||
+ is_hugetlb_entry_hwpoisoned(entry))) {
+ swp_entry_t swp_entry = pte_to_swp_entry(entry);
+
+ if (is_write_migration_entry(swp_entry) && cow) {
+ /*
+ * COW mappings require pages in both
+ * parent and child to be set to read.
+ */
+ make_migration_entry_read(&swp_entry);
+ entry = swp_entry_to_pte(swp_entry);
+ set_huge_pte_at(src, addr, src_pte, entry);
+ }
+ set_huge_pte_at(dst, addr, dst_pte, entry);
+ } else {
if (cow)
huge_ptep_set_wrprotect(src, addr, src_pte);
- entry = huge_ptep_get(src_pte);
ptepage = pte_page(entry);
get_page(ptepage);
page_dup_rmap(ptepage);
return -ENOMEM;
}
-static int is_hugetlb_entry_migration(pte_t pte)
-{
- swp_entry_t swp;
-
- if (huge_pte_none(pte) || pte_present(pte))
- return 0;
- swp = pte_to_swp_entry(pte);
- if (non_swap_entry(swp) && is_migration_entry(swp))
- return 1;
- else
- return 0;
-}
-
-static int is_hugetlb_entry_hwpoisoned(pte_t pte)
-{
- swp_entry_t swp;
-
- if (huge_pte_none(pte) || pte_present(pte))
- return 0;
- swp = pte_to_swp_entry(pte);
- if (non_swap_entry(swp) && is_hwpoison_entry(swp))
- return 1;
- else
- return 0;
-}
-
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
return 0;
} else if (PageHuge(hpage)) {
/*
- * Check "just unpoisoned", "filter hit", and
- * "race with other subpage."
+ * Check "filter hit" and "race with other subpage."
*/
lock_page(hpage);
- if (!PageHWPoison(hpage)
- || (hwpoison_filter(p) && TestClearPageHWPoison(p))
- || (p != hpage && TestSetPageHWPoison(hpage))) {
- atomic_long_sub(nr_pages, &mce_bad_pages);
- return 0;
+ if (PageHWPoison(hpage)) {
+ if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
+ || (p != hpage && TestSetPageHWPoison(hpage))) {
+ atomic_long_sub(nr_pages, &mce_bad_pages);
+ unlock_page(hpage);
+ return 0;
+ }
}
set_page_hwpoison_huge_page(hpage);
res = dequeue_hwpoisoned_huge_page(hpage);
*/
if (!PageHWPoison(p)) {
printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
+ atomic_long_sub(nr_pages, &mce_bad_pages);
+ put_page(hpage);
res = 0;
goto out;
}
unsigned long address, unsigned int fault_flags)
{
struct vm_area_struct *vma;
+ vm_flags_t vm_flags;
int ret;
vma = find_extend_vma(mm, address);
if (!vma || address < vma->vm_start)
return -EFAULT;
+ vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
+ if (!(vm_flags & vma->vm_flags))
+ return -EFAULT;
+
ret = handle_mm_fault(mm, vma, address, fault_flags);
if (ret & VM_FAULT_ERROR) {
if (ret & VM_FAULT_OOM)
* If pagelist != NULL then isolate pages from the LRU and
* put them on the pagelist.
*/
-static struct vm_area_struct *
+static int
check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
const nodemask_t *nodes, unsigned long flags, void *private)
{
- int err;
- struct vm_area_struct *first, *vma, *prev;
-
+ int err = 0;
+ struct vm_area_struct *vma, *prev;
- first = find_vma(mm, start);
- if (!first)
- return ERR_PTR(-EFAULT);
+ vma = find_vma(mm, start);
+ if (!vma)
+ return -EFAULT;
prev = NULL;
- for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
+ for (; vma && vma->vm_start < end; vma = vma->vm_next) {
if (!(flags & MPOL_MF_DISCONTIG_OK)) {
if (!vma->vm_next && vma->vm_end < end)
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
if (prev && prev->vm_end < vma->vm_start)
- return ERR_PTR(-EFAULT);
+ return -EFAULT;
}
if (!is_vm_hugetlb_page(vma) &&
((flags & MPOL_MF_STRICT) ||
start = vma->vm_start;
err = check_pgd_range(vma, start, endvma, nodes,
flags, private);
- if (err) {
- first = ERR_PTR(err);
+ if (err)
break;
- }
}
prev = vma;
}
- return first;
+ return err;
}
/*
nodemask_t nmask;
LIST_HEAD(pagelist);
int err = 0;
- struct vm_area_struct *vma;
nodes_clear(nmask);
node_set(source, nmask);
- vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
+ /*
+ * This does not "check" the range but isolates all pages that
+ * need migration. Between passing in the full user address
+ * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
+ */
+ VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
+ check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_node_page, dest,
/*
* Allocate a new page for page migration based on vma policy.
- * Start assuming that page is mapped by vma pointed to by @private.
+ * Start by assuming the page is mapped by the same vma as contains @start.
* Search forward from there, if not. N.B., this assumes that the
* list of pages handed to migrate_pages()--which is how we get here--
* is in virtual address order.
*/
-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
+static struct page *new_page(struct page *page, unsigned long start, int **x)
{
- struct vm_area_struct *vma = (struct vm_area_struct *)private;
+ struct vm_area_struct *vma;
unsigned long uninitialized_var(address);
+ vma = find_vma(current->mm, start);
while (vma) {
address = page_address_in_vma(page, vma);
if (address != -EFAULT)
return -ENOSYS;
}
-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
+static struct page *new_page(struct page *page, unsigned long start, int **x)
{
return NULL;
}
unsigned short mode, unsigned short mode_flags,
nodemask_t *nmask, unsigned long flags)
{
- struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
struct mempolicy *new;
unsigned long end;
if (err)
goto mpol_out;
- vma = check_range(mm, start, end, nmask,
+ err = check_range(mm, start, end, nmask,
flags | MPOL_MF_INVERT, &pagelist);
-
- err = PTR_ERR(vma);
- if (!IS_ERR(vma)) {
+ if (!err) {
int nr_failed = 0;
err = mbind_range(mm, start, end, new);
if (!list_empty(&pagelist)) {
- nr_failed = migrate_pages(&pagelist, new_vma_page,
- (unsigned long)vma,
- false, true);
+ nr_failed = migrate_pages(&pagelist, new_page,
+ start, false, true);
if (nr_failed)
putback_lru_pages(&pagelist);
}
* => fast response on large errors; small oscillation near setpoint
*/
setpoint = (freerun + limit) / 2;
- x = div_s64((setpoint - dirty) << RATELIMIT_CALC_SHIFT,
+ x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
limit - setpoint + 1);
pos_ratio = x;
pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
x_intercept = bdi_setpoint + span;
if (bdi_dirty < x_intercept - span / 4) {
- pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty),
+ pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
x_intercept - bdi_setpoint + 1);
} else
pos_ratio /= 4;
* LOCK should suffice since the actual taking of the lock must
* happen _before_ what follows.
*/
+ might_sleep();
if (mutex_is_locked(&anon_vma->root->mutex)) {
anon_vma_lock(anon_vma);
anon_vma_unlock(anon_vma);
* above cannot corrupt).
*/
if (!page_mapped(page)) {
+ rcu_read_unlock();
put_anon_vma(anon_vma);
- anon_vma = NULL;
+ return NULL;
}
out:
rcu_read_unlock();
}
if (!page_mapped(page)) {
+ rcu_read_unlock();
put_anon_vma(anon_vma);
- anon_vma = NULL;
- goto out;
+ return NULL;
}
/* we pinned the anon_vma, its safe to sleep */
{
struct anon_vma *root = anon_vma->root;
+ anon_vma_free(anon_vma);
if (root != anon_vma && atomic_dec_and_test(&root->refcount))
anon_vma_free(root);
-
- anon_vma_free(anon_vma);
}
#ifdef CONFIG_MIGRATION
}
}
+ tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
current->reclaim_state = NULL;
+ lockdep_clear_current_reclaim_state();
+
return 0;
}
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
struct hci_cp_auth_requested cp;
- /* encrypt must be pending if auth is also pending */
- set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
-
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
sizeof(cp), &cp);
- if (conn->key_type != 0xff)
+
+ /* If we're already encrypted set the REAUTH_PEND flag,
+ * otherwise set the ENCRYPT_PEND.
+ */
+ if (conn->link_mode & HCI_LM_ENCRYPT)
set_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
+ else
+ set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
}
return 0;
/* If we're not the initiators request authorization to
* proceed from user space (mgmt_user_confirm with
- * confirm_hint set to 1). */
- if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
+ * confirm_hint set to 1). The exception is if neither
+ * side had MITM in which case we do auto-accept.
+ */
+ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) &&
+ (loc_mitm || rem_mitm)) {
BT_DBG("Confirming auto-accept as acceptor");
confirm_hint = 1;
goto confirm;
return 0;
}
+static int br_dev_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct net_bridge *br = netdev_priv(dev);
+
+ if (tb[IFLA_ADDRESS]) {
+ spin_lock_bh(&br->lock);
+ br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
+ spin_unlock_bh(&br->lock);
+ }
+
+ return register_netdevice(dev);
+}
+
struct rtnl_link_ops br_link_ops __read_mostly = {
.kind = "bridge",
.priv_size = sizeof(struct net_bridge),
.setup = br_dev_setup,
.validate = br_validate,
+ .newlink = br_dev_newlink,
.dellink = br_dev_delete,
};
if (repl->num_counters &&
copy_to_user(repl->counters, counterstmp,
repl->num_counters * sizeof(struct ebt_counter))) {
- ret = -EFAULT;
+ /* Silent error, can't fail, new table is already in place */
+ net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
}
- else
- ret = 0;
/* decrease module count and free resources */
EBT_ENTRY_ITERATE(table->entries, table->entries_size,
return r;
}
+static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
+ int offset, size_t size, bool more)
+{
+ int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
+ int ret;
+
+ ret = kernel_sendpage(sock, page, offset, size, flags);
+ if (ret == -EAGAIN)
+ ret = 0;
+
+ return ret;
+}
+
+static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
+ int offset, size_t size, bool more)
+{
+ int ret;
+ struct kvec iov;
+
+ /* sendpage cannot properly handle pages with page_count == 0,
+ * we need to fallback to sendmsg if that's the case */
+ if (page_count(page) >= 1)
+ return __ceph_tcp_sendpage(sock, page, offset, size, more);
+
+ iov.iov_base = kmap(page) + offset;
+ iov.iov_len = size;
+ ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more);
+ kunmap(page);
+
+ return ret;
+}
/*
* Shutdown/close the socket for the given connection.
cpu_to_le32(crc32c(tmpcrc, base, len));
con->out_msg_pos.did_page_crc = 1;
}
- ret = kernel_sendpage(con->sock, page,
+ ret = ceph_tcp_sendpage(con->sock, page,
con->out_msg_pos.page_pos + page_shift,
- len,
- MSG_DONTWAIT | MSG_NOSIGNAL |
- MSG_MORE);
+ len, 1);
if (crc &&
(msg->pages || msg->pagelist || msg->bio || in_trail))
kunmap(page);
- if (ret == -EAGAIN)
- ret = 0;
if (ret <= 0)
goto out;
skb->vlan_tci = 0;
skb->dev = napi->dev;
skb->skb_iif = 0;
+ skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
napi->skb = skb;
}
if (skb_is_nonlinear(skb))
return 0;
+ if (skb->len < sizeof(struct nlattr))
+ return 0;
if (A > skb->len - sizeof(struct nlattr))
return 0;
if (skb_is_nonlinear(skb))
return 0;
+ if (skb->len < sizeof(struct nlattr))
+ return 0;
if (A > skb->len - sizeof(struct nlattr))
return 0;
nla = (struct nlattr *)&skb->data[A];
- if (nla->nla_len > A - skb->len)
+ if (nla->nla_len > skb->len - A)
return 0;
nla = nla_find_nested(nla, X);
return 0;
}
-static size_t rtnl_port_size(const struct net_device *dev)
+static size_t rtnl_port_size(const struct net_device *dev,
+ u32 ext_filter_mask)
{
size_t port_size = nla_total_size(4) /* PORT_VF */
+ nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
size_t port_self_size = nla_total_size(sizeof(struct nlattr))
+ port_size;
- if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
+ if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
+ !(ext_filter_mask & RTEXT_FILTER_VF))
return 0;
if (dev_num_vf(dev->dev.parent))
return port_self_size + vf_ports_size +
+ nla_total_size(ext_filter_mask
& RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
+ rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
- + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+ rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
}
return 0;
}
-static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
+static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
+ u32 ext_filter_mask)
{
int err;
- if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
+ if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
+ !(ext_filter_mask & RTEXT_FILTER_VF))
return 0;
err = rtnl_port_self_fill(skb, dev);
nla_nest_end(skb, vfinfo);
}
- if (rtnl_port_fill(skb, dev))
+ if (rtnl_port_fill(skb, dev, ext_filter_mask))
goto nla_put_failure;
if (dev->rtnl_link_ops) {
struct hlist_node *node;
struct nlattr *tb[IFLA_MAX+1];
u32 ext_filter_mask = 0;
+ int err;
s_h = cb->args[0];
s_idx = cb->args[1];
hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
if (idx < s_idx)
goto cont;
- if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
- NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, 0,
- NLM_F_MULTI,
- ext_filter_mask) <= 0)
+ err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, 0,
+ NLM_F_MULTI,
+ ext_filter_mask);
+ /* If we ran out of room on the first message,
+ * we're in trouble
+ */
+ WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
+
+ if (err <= 0)
goto out;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
return 0;
}
-
+EXPORT_SYMBOL_GPL(skb_copy_ubufs);
/**
* skb_clone - duplicate an sk_buff
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
{
int headerlen = skb_headroom(skb);
- unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
+ unsigned int size = skb_end_offset(skb) + skb->data_len;
struct sk_buff *n = alloc_skb(size, gfp_mask);
if (!n)
{
int i;
u8 *data;
- int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
+ int size = nhead + skb_end_offset(skb) + ntail;
long off;
bool fastpath;
if (unlikely(!nskb))
goto err;
- hsize = skb_end_pointer(nskb) - nskb->head;
+ hsize = skb_end_offset(nskb);
if (skb_cow_head(nskb, doffset + headroom)) {
kfree_skb(nskb);
goto err;
}
- nskb->truesize += skb_end_pointer(nskb) - nskb->head -
- hsize;
+ nskb->truesize += skb_end_offset(nskb) - hsize;
skb_release_head_state(nskb);
__skb_push(nskb, doffset);
} else {
skb_put(nskb, hsize), hsize);
while (pos < offset + len && i < nfrags) {
+ if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
+ goto err;
+
*frag = skb_shinfo(skb)->frags[i];
__skb_frag_ref(frag);
size = skb_frag_size(frag);
unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
{
const struct skb_shared_info *shinfo = skb_shinfo(skb);
- unsigned int hdr_len;
if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
- hdr_len = tcp_hdrlen(skb);
- else
- hdr_len = sizeof(struct udphdr);
- return hdr_len + shinfo->gso_size;
+ return tcp_hdrlen(skb) + shinfo->gso_size;
+
+ /* UFO sets gso_size to the size of the fragmentation
+ * payload, i.e. the size of the L4 (UDP) header is already
+ * accounted for.
+ */
+ return shinfo->gso_size;
}
EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
if (fi == NULL)
goto failure;
+ fib_info_cnt++;
if (cfg->fc_mx) {
fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
if (!fi->fib_metrics)
goto failure;
} else
fi->fib_metrics = (u32 *) dst_default_metrics;
- fib_info_cnt++;
fi->fib_net = hold_net(net);
fi->fib_protocol = cfg->fc_protocol;
static bool ip_may_fragment(const struct sk_buff *skb)
{
return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
- !skb->local_df;
+ skb->local_df;
}
static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
{
- if (skb->len <= mtu || skb->local_df)
+ if (skb->len <= mtu)
return false;
if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
module_init(ipip_init);
module_exit(ipip_fini);
MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("ipip");
MODULE_ALIAS_NETDEV("tunl0");
xt_free_table_info(oldinfo);
if (copy_to_user(counters_ptr, counters,
- sizeof(struct xt_counters) * num_counters) != 0)
- ret = -EFAULT;
+ sizeof(struct xt_counters) * num_counters) != 0) {
+ /* Silent error, can't fail, new table is already in place */
+ net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n");
+ }
vfree(counters);
xt_table_unlock(t);
return ret;
xt_free_table_info(oldinfo);
if (copy_to_user(counters_ptr, counters,
- sizeof(struct xt_counters) * num_counters) != 0)
- ret = -EFAULT;
+ sizeof(struct xt_counters) * num_counters) != 0) {
+ /* Silent error, can't fail, new table is already in place */
+ net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
+ }
vfree(counters);
xt_table_unlock(t);
return ret;
struct net *net = sock_net(sk);
gid_t group = current_egid();
gid_t range[2];
- struct group_info *group_info = get_current_groups();
- int i, j, count = group_info->ngroups;
+ struct group_info *group_info;
+ int i, j, count;
+ int ret = 0;
inet_get_ping_group_range_net(net, range, range+1);
if (range[0] <= group && group <= range[1])
return 0;
+ group_info = get_current_groups();
+ count = group_info->ngroups;
for (i = 0; i < group_info->nblocks; i++) {
int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
for (j = 0; j < cp_count; j++) {
group = group_info->blocks[i][j];
if (range[0] <= group && group <= range[1])
- return 0;
+ goto out_release_group;
}
count -= cp_count;
}
- return -EACCES;
+ ret = -EACCES;
+
+out_release_group:
+ put_group_info(group_info);
+ return ret;
}
static void ping_close(struct sock *sk, long timeout)
struct in_device *out_dev;
unsigned int flags = 0;
__be32 spec_dst;
- u32 itag;
+ u32 itag = 0;
/* get a working reference to the output device */
out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
ratio += cnt;
- ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT);
+ ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT);
}
/* Some calls are for duplicates without timetamps */
void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
{
static atomic_t ipv6_fragmentation_id;
- int old, new;
+ int ident;
if (rt && !(rt->dst.flags & DST_NOPEER)) {
struct inet_peer *peer;
return;
}
}
- do {
- old = atomic_read(&ipv6_fragmentation_id);
- new = old + 1;
- if (!new)
- new = 1;
- } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
- fhdr->identification = htonl(new);
+ ident = atomic_inc_return(&ipv6_fragmentation_id);
+ fhdr->identification = htonl(ident);
}
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
MODULE_AUTHOR("Ville Nuorvala");
MODULE_DESCRIPTION("IPv6 tunneling device");
MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("ip6tnl");
MODULE_ALIAS_NETDEV("ip6tnl0");
#ifdef IP6_TNL_DEBUG
xt_free_table_info(oldinfo);
if (copy_to_user(counters_ptr, counters,
- sizeof(struct xt_counters) * num_counters) != 0)
- ret = -EFAULT;
+ sizeof(struct xt_counters) * num_counters) != 0) {
+ /* Silent error, can't fail, new table is already in place */
+ net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
+ }
vfree(counters);
xt_table_unlock(t);
return ret;
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu)
- return mtu;
+ goto out;
mtu = IPV6_MIN_MTU;
mtu = idev->cnf.mtu6;
rcu_read_unlock();
- return mtu;
+out:
+ return min_t(unsigned int, mtu, IP6_MAX_MTU);
}
static struct dst_entry *icmp6_dst_gc_list;
module_init(sit_init);
module_exit(sit_cleanup);
MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("sit");
MODULE_ALIAS_NETDEV("sit0");
session->deref = pppol2tp_session_sock_put;
/* If PMTU discovery was enabled, use the MTU that was discovered */
- dst = sk_dst_get(sk);
+ dst = sk_dst_get(tunnel->sock);
if (dst != NULL) {
- u32 pmtu = dst_mtu(__sk_dst_get(sk));
+ u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
if (pmtu != 0)
session->mtu = session->mru = pmtu -
PPPOL2TP_HEADER_OVERHEAD;
ssize_t ret = -EINVAL;
read_lock(&dev_base_lock);
- if (sdata->dev->reg_state == NETREG_REGISTERED)
- ret = (*format)(sdata, buf, sizeof(buf));
+ ret = (*format)(sdata, buf, sizeof(buf));
read_unlock(&dev_base_lock);
if (ret >= 0)
ret = -ENODEV;
rtnl_lock();
- if (sdata->dev->reg_state == NETREG_REGISTERED)
- ret = (*write)(sdata, buf, count);
+ ret = (*write)(sdata, buf, count);
rtnl_unlock();
freebuf:
sdata->u.ibss.privacy = params->privacy;
sdata->u.ibss.basic_rates = params->basic_rates;
+ sdata->u.ibss.last_scan_completed = jiffies;
memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate,
sizeof(params->mcast_rate));
out:
if (err) {
m->tcf_qstats.overlimits++;
- /* should we be asking for packet to be dropped?
- * may make sense for redirect case only
- */
- retval = TC_ACT_SHOT;
- } else {
+ if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
+ retval = TC_ACT_SHOT;
+ else
+ retval = m->tcf_action;
+ } else
retval = m->tcf_action;
- }
spin_unlock(&m->tcf_lock);
return retval;
/* Only real associations count against the endpoint, so
* don't bother for if this is a temporary association.
*/
- if (!asoc->temp) {
+ if (!list_empty(&asoc->asocs)) {
list_del(&asoc->asocs);
/* Decrement the backlog value for a TCP-style listening
continue;
if ((laddr->state == SCTP_ADDR_SRC) &&
(AF_INET == laddr->a.sa.sa_family)) {
- fl4->saddr = laddr->a.v4.sin_addr.s_addr;
fl4->fl4_sport = laddr->a.v4.sin_port;
+ flowi4_update_output(fl4,
+ asoc->base.sk->sk_bound_dev_if,
+ RT_CONN_FLAGS(asoc->base.sk),
+ daddr->v4.sin_addr.s_addr,
+ laddr->a.v4.sin_addr.s_addr);
+
rt = ip_route_output_key(&init_net, fl4);
if (!IS_ERR(rt)) {
dst = &rt->dst;
if (strncmp(symname, "_restgpr_", sizeof("_restgpr_") - 1) == 0 ||
strncmp(symname, "_savegpr_", sizeof("_savegpr_") - 1) == 0 ||
strncmp(symname, "_rest32gpr_", sizeof("_rest32gpr_") - 1) == 0 ||
- strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0)
+ strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0 ||
+ strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
+ strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
return 1;
if (info->hdr->e_machine == EM_PPC64)
/* Special register function linked on all modules during final link of .ko */
if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 ||
- strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0)
+ strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 ||
+ strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
+ strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
return 1;
/* Do not ignore this symbol */
return 0;
static int MIPS_is_fake_mcount(Elf_Rel const *rp)
{
- static Elf_Addr old_r_offset;
+ static Elf_Addr old_r_offset = ~(Elf_Addr)0;
Elf_Addr current_r_offset = _w(rp->r_offset);
int is_fake;
- is_fake = old_r_offset &&
+ is_fake = (old_r_offset != ~(Elf_Addr)0) &&
(current_r_offset - old_r_offset == MIPS_FAKEMCOUNT_OFFSET);
old_r_offset = current_r_offset;
* @xattr_value: pointer to the new extended attribute value
* @xattr_value_len: pointer to the new extended attribute value length
*
- * Updating 'security.evm' requires CAP_SYS_ADMIN privileges and that
- * the current value is valid.
+ * Before allowing the 'security.evm' protected xattr to be updated,
+ * verify the existing value is valid. As only the kernel should have
+ * access to the EVM encrypted key needed to calculate the HMAC, prevent
+ * userspace from writing HMAC value. Writing 'security.evm' requires
+ * requires CAP_SYS_ADMIN privileges.
*/
int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
const void *xattr_value, size_t xattr_value_len)
{
+ const struct evm_ima_xattr_data *xattr_data = xattr_value;
+
+ if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
+ && (xattr_data->type == EVM_XATTR_HMAC))
+ return -EPERM;
return evm_protect_xattr(dentry, xattr_name, xattr_value,
xattr_value_len);
}
{
struct snd_kcontrol *kctl;
+ /* Make sure that the ids assigned to the control do not wrap around */
+ if (card->last_numid >= UINT_MAX - count)
+ card->last_numid = 0;
+
list_for_each_entry(kctl, &card->controls, list) {
if (kctl->id.numid < card->last_numid + 1 + count &&
kctl->id.numid + kctl->count > card->last_numid + 1) {
{
struct snd_ctl_elem_id id;
unsigned int idx;
+ unsigned int count;
int err = -EINVAL;
if (! kcontrol)
if (snd_BUG_ON(!card || !kcontrol->info))
goto error;
id = kcontrol->id;
+ if (id.index > UINT_MAX - kcontrol->count)
+ goto error;
+
down_write(&card->controls_rwsem);
if (snd_ctl_find_id(card, &id)) {
up_write(&card->controls_rwsem);
card->controls_count += kcontrol->count;
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
+ count = kcontrol->count;
up_write(&card->controls_rwsem);
- for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
+ for (idx = 0; idx < count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
return 0;
bool add_on_replace)
{
struct snd_ctl_elem_id id;
+ unsigned int count;
unsigned int idx;
struct snd_kcontrol *old;
int ret;
card->controls_count += kcontrol->count;
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
+ count = kcontrol->count;
up_write(&card->controls_rwsem);
- for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
+ for (idx = 0; idx < count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
return 0;
result = kctl->put(kctl, control);
}
if (result > 0) {
+ struct snd_ctl_elem_id id = control->id;
up_read(&card->controls_rwsem);
- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
- &control->id);
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &id);
return 0;
}
}
struct user_element {
struct snd_ctl_elem_info info;
+ struct snd_card *card;
void *elem_data; /* element data */
unsigned long elem_data_size; /* size of element data in bytes */
void *tlv_data; /* TLV data */
{
struct user_element *ue = kcontrol->private_data;
+ mutex_lock(&ue->card->user_ctl_lock);
memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size);
+ mutex_unlock(&ue->card->user_ctl_lock);
return 0;
}
{
int change;
struct user_element *ue = kcontrol->private_data;
-
+
+ mutex_lock(&ue->card->user_ctl_lock);
change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0;
if (change)
memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size);
+ mutex_unlock(&ue->card->user_ctl_lock);
return change;
}
new_data = memdup_user(tlv, size);
if (IS_ERR(new_data))
return PTR_ERR(new_data);
+ mutex_lock(&ue->card->user_ctl_lock);
change = ue->tlv_data_size != size;
if (!change)
change = memcmp(ue->tlv_data, new_data, size);
kfree(ue->tlv_data);
ue->tlv_data = new_data;
ue->tlv_data_size = size;
+ mutex_unlock(&ue->card->user_ctl_lock);
} else {
- if (! ue->tlv_data_size || ! ue->tlv_data)
- return -ENXIO;
- if (size < ue->tlv_data_size)
- return -ENOSPC;
+ int ret = 0;
+
+ mutex_lock(&ue->card->user_ctl_lock);
+ if (!ue->tlv_data_size || !ue->tlv_data) {
+ ret = -ENXIO;
+ goto err_unlock;
+ }
+ if (size < ue->tlv_data_size) {
+ ret = -ENOSPC;
+ goto err_unlock;
+ }
if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
- return -EFAULT;
+ ret = -EFAULT;
+err_unlock:
+ mutex_unlock(&ue->card->user_ctl_lock);
+ if (ret)
+ return ret;
}
return change;
}
struct user_element *ue;
int idx, err;
- if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS)
- return -ENOMEM;
if (info->count < 1)
return -EINVAL;
access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
info->id.numid = 0;
memset(&kctl, 0, sizeof(kctl));
- down_write(&card->controls_rwsem);
- _kctl = snd_ctl_find_id(card, &info->id);
- err = 0;
- if (_kctl) {
- if (replace)
- err = snd_ctl_remove(card, _kctl);
- else
- err = -EBUSY;
- } else {
- if (replace)
- err = -ENOENT;
+
+ if (replace) {
+ err = snd_ctl_remove_user_ctl(file, &info->id);
+ if (err)
+ return err;
}
- up_write(&card->controls_rwsem);
- if (err < 0)
- return err;
+
+ if (card->user_ctl_count >= MAX_USER_CONTROLS)
+ return -ENOMEM;
+
memcpy(&kctl.id, &info->id, sizeof(info->id));
kctl.count = info->owner ? info->owner : 1;
access |= SNDRV_CTL_ELEM_ACCESS_USER;
ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
if (ue == NULL)
return -ENOMEM;
+ ue->card = card;
ue->info = *info;
ue->info.access = 0;
ue->elem_data = (char *)ue + sizeof(*ue);
}
err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv);
if (err > 0) {
+ struct snd_ctl_elem_id id = kctl->id;
up_read(&card->controls_rwsem);
- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &kctl->id);
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id);
return 0;
}
} else {
INIT_LIST_HEAD(&card->devices);
init_rwsem(&card->controls_rwsem);
rwlock_init(&card->ctl_files_rwlock);
+ mutex_init(&card->user_ctl_lock);
INIT_LIST_HEAD(&card->controls);
INIT_LIST_HEAD(&card->ctl_files);
spin_lock_init(&card->files_lock);
ALC269_FIXUP_STEREO_DMIC,
ALC269_FIXUP_QUANTA_MUTE,
ALC269_FIXUP_LIFEBOOK,
+ ALC269_FIXUP_LIFEBOOK_EXTMIC,
ALC269_FIXUP_AMIC,
ALC269_FIXUP_DMIC,
ALC269VB_FIXUP_AMIC,
.chained = true,
.chain_id = ALC269_FIXUP_QUANTA_MUTE
},
+ [ALC269_FIXUP_LIFEBOOK_EXTMIC] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x19, 0x01a1903c }, /* headset mic, with jack detect */
+ { }
+ },
+ },
[ALC269_FIXUP_AMIC] = {
.type = ALC_FIXUP_PINS,
.v.pins = (const struct alc_pincfg[]) {
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
+ SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
{ .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 },
{ .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 },
{ .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 },
+ { .id = 0x10ec0867, .name = "ALC891", .patch = patch_alc882 },
{ .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },
{ .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 },
{ .id = 0x10ec0883, .name = "ALC883", .patch = patch_alc882 },
spin_lock(&vcpu->async_pf.lock);
list_add_tail(&apf->link, &vcpu->async_pf.done);
apf->page = page;
- apf->done = true;
spin_unlock(&vcpu->async_pf.lock);
/*
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
- mmdrop(mm);
+ mmput(mm);
kvm_put_kvm(vcpu->kvm);
}
struct kvm_async_pf *work =
list_entry(vcpu->async_pf.queue.next,
typeof(*work), queue);
- cancel_work_sync(&work->work);
list_del(&work->queue);
- if (!work->done) /* work was canceled */
+ if (cancel_work_sync(&work->work)) {
+ mmput(work->mm);
+ kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
kmem_cache_free(async_pf_cache, work);
+ }
}
spin_lock(&vcpu->async_pf.lock);
return 0;
work->page = NULL;
- work->done = false;
work->vcpu = vcpu;
work->gva = gva;
work->addr = gfn_to_hva(vcpu->kvm, gfn);
work->arch = *arch;
work->mm = current->mm;
- atomic_inc(&work->mm->mm_count);
+ atomic_inc(&work->mm->mm_users);
kvm_get_kvm(work->vcpu->kvm);
/* this can't really happen otherwise gfn_to_pfn_async
return 1;
retry_sync:
kvm_put_kvm(work->vcpu->kvm);
- mmdrop(work->mm);
+ mmput(work->mm);
kmem_cache_free(async_pf_cache, work);
return 0;
}