spia_pedr=
spia_peddr=
+ stack_guard_gap= [MM]
+ override the default stack gap protection. The value
+ is in page units and it defines how many pages prior
+ to (for stacks growing down) resp. after (for stacks
+ growing up) the main stack are reserved for no other
+ mapping. Default value is 256 pages.
+
stacktrace [FTRACE]
Enabled the stack tracer on boot up.
VERSION = 3
PATCHLEVEL = 2
-SUBLEVEL = 84
+SUBLEVEL = 91
EXTRAVERSION =
NAME = Saber-toothed Squirrel
/* At this point: (!vma || addr < vma->vm_end). */
if (limit - len < addr)
return -ENOMEM;
- if (!vma || addr + len <= vma->vm_start)
+ if (!vma || addr + len <= vm_start_gap(vma))
return addr;
addr = vma->vm_end;
vma = vma->vm_next;
#define CPUID_EXT_ISAR4 "c2, 4"
#define CPUID_EXT_ISAR5 "c2, 5"
+/* Qualcomm implemented cores */
+#define ARM_CPU_PART_SCORPION 0x510002d0
+
extern unsigned int processor_id;
#ifdef CONFIG_CPU_CP15
return 0;
}
+ /*
+ * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
+ * whenever a WFI is issued, even if the core is not powered down, in
+ * violation of the architecture. When DBGPRSR.SPD is set, accesses to
+ * breakpoint and watchpoint registers are treated as undefined, so
+ * this results in boot time and runtime failures when these are
+ * accessed and we unexpectedly take a trap.
+ *
+ * It's not clear if/how this can be worked around, so we blacklist
+ * Scorpion CPUs to avoid these issues.
+ */
+ if ((read_cpuid_id() & 0xff00fff0) == ARM_CPU_PART_SCORPION) {
+ pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
+ return 0;
+ }
+
/* Determine how many BRPs/WRPs are available. */
core_num_brps = get_num_brps();
core_num_wrps = get_num_wrps();
const void *kbuf, const void __user *ubuf)
{
int ret;
- struct pt_regs newregs;
+ struct pt_regs newregs = *task_pt_regs(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&newregs,
void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, struct dma_attrs *attrs)
{
- pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- unsigned long start_addr;
+ unsigned long start_addr, vm_start;
int do_align = 0;
int aliasing = cache_is_vipt_aliasing();
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (len > mm->cached_hole_size) {
}
return -ENOMEM;
}
- if (!vma || addr + len <= vma->vm_start) {
+ if (vma)
+ vm_start = vm_start_gap(vma);
+ if (!vma || addr + len <= vm_start) {
/*
* Remember the place where we stopped the search:
*/
mm->free_area_cache = addr + len;
return addr;
}
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
+ if (addr + mm->cached_hole_size < vm_start)
+ mm->cached_hole_size = vm_start - addr;
addr = vma->vm_end;
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
asflags-y += $(LINUXINCLUDE)
ccflags-y += -O2 $(LINUXINCLUDE)
+
+ifdef CONFIG_ETRAX_AXISFLASHMAP
+
arch-$(CONFIG_ETRAX_ARCH_V10) = v10
arch-$(CONFIG_ETRAX_ARCH_V32) = v32
$(call if_changed,objcopy)
cp -p $(obj)/rescue.bin $(objtree)
+else
+$(obj)/rescue.bin:
+
+endif
+
$(obj)/testrescue.bin: $(obj)/testrescue.o
$(OBJCOPY) $(OBJCOPYFLAGS) $(obj)/testrescue.o tr.bin
# Pad it to 784 bytes
addr = PAGE_ALIGN(addr);
vma = find_vma(current->mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
goto success;
}
for (; vma; vma = vma->vm_next) {
if (addr > limit)
break;
- if (addr + len <= vma->vm_start)
+ if (addr + len <= vm_start_gap(vma))
goto success;
addr = vma->vm_end;
}
for (; vma; vma = vma->vm_next) {
if (addr > limit)
break;
- if (addr + len <= vma->vm_start)
+ if (addr + len <= vm_start_gap(vma))
goto success;
addr = vma->vm_end;
}
long map_shared = (flags & MAP_SHARED);
unsigned long start_addr, align_mask = PAGE_SIZE - 1;
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
+ unsigned long prev_end;
if (len > RGN_MAP_LIMIT)
return -ENOMEM;
full_search:
start_addr = addr = (addr + align_mask) & ~align_mask;
- for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ for (vma = find_vma_prev(mm, addr, &prev); ; prev = vma,
+ vma = vma->vm_next) {
+ if (prev) {
+ prev_end = vm_end_gap(prev);
+ if (addr < prev_end) {
+ addr = (prev_end + align_mask) & ~align_mask;
+ /* If vma already violates gap, forget it */
+ if (vma && addr > vma->vm_start)
+ addr = vma->vm_start;
+ }
+ }
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
if (start_addr != TASK_UNMAPPED_BASE) {
}
return -ENOMEM;
}
- if (!vma || addr + len <= vma->vm_start) {
+ if (!vma || addr + len <= vm_start_gap(vma)) {
/* Remember the address where we stopped this search: */
mm->free_area_cache = addr + len;
return addr;
}
- addr = (vma->vm_end + align_mask) & ~align_mask;
}
}
/* At this point: (!vmm || addr < vmm->vm_end). */
if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
return -ENOMEM;
- if (!vmm || (addr + len) <= vmm->vm_start)
+ if (!vmm || (addr + len) <= vm_start_gap(vmm))
return addr;
- addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
+ addr = ALIGN(vm_end_gap(vmm), HPAGE_SIZE);
}
}
archprepare:
ifdef CONFIG_MIPS32_N32
- @echo ' Checking missing-syscalls for N32'
+ @$(kecho) ' Checking missing-syscalls for N32'
$(Q)$(MAKE) $(build)=. missing-syscalls missing_syscalls_flags="-mabi=n32"
endif
ifdef CONFIG_MIPS32_O32
- @echo ' Checking missing-syscalls for O32'
+ @$(kecho) ' Checking missing-syscalls for O32'
$(Q)$(MAKE) $(build)=. missing-syscalls missing_syscalls_flags="-mabi=32"
endif
ADD src, src, 16*NBYTES
EXC( STORE t3, UNIT(7)(dst), s_exc_p9u)
ADD dst, dst, 16*NBYTES
-EXC( LOAD t0, UNIT(-8)(src), l_exc_copy)
-EXC( LOAD t1, UNIT(-7)(src), l_exc_copy)
-EXC( LOAD t2, UNIT(-6)(src), l_exc_copy)
-EXC( LOAD t3, UNIT(-5)(src), l_exc_copy)
+EXC( LOAD t0, UNIT(-8)(src), l_exc_copy_rewind16)
+EXC( LOAD t1, UNIT(-7)(src), l_exc_copy_rewind16)
+EXC( LOAD t2, UNIT(-6)(src), l_exc_copy_rewind16)
+EXC( LOAD t3, UNIT(-5)(src), l_exc_copy_rewind16)
EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u)
EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u)
EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u)
EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u)
-EXC( LOAD t0, UNIT(-4)(src), l_exc_copy)
-EXC( LOAD t1, UNIT(-3)(src), l_exc_copy)
-EXC( LOAD t2, UNIT(-2)(src), l_exc_copy)
-EXC( LOAD t3, UNIT(-1)(src), l_exc_copy)
+EXC( LOAD t0, UNIT(-4)(src), l_exc_copy_rewind16)
+EXC( LOAD t1, UNIT(-3)(src), l_exc_copy_rewind16)
+EXC( LOAD t2, UNIT(-2)(src), l_exc_copy_rewind16)
+EXC( LOAD t3, UNIT(-1)(src), l_exc_copy_rewind16)
EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u)
EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u)
EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u)
nop
END(memcpy)
+l_exc_copy_rewind16:
+ /* Rewind src and dst by 16*NBYTES for l_exc_copy */
+ SUB src, src, 16*NBYTES
+ SUB dst, dst, 16*NBYTES
l_exc_copy:
/*
* Copy bytes from src until faulting load address (or until a
# CONFIG_MLX4_DEBUG is not set
CONFIG_TEHUTI=m
CONFIG_BNX2X=m
-CONFIG_QLGE=m
CONFIG_SFC=m
CONFIG_BE2NET=m
CONFIG_LIBERTAS_THINFIRM=m
" daddu %0, %4 \n"
" dsll32 $1, %0, 0 \n"
" daddu %0, $1 \n"
+ " sltu $1, %0, $1 \n"
" dsra32 %0, %0, 0 \n"
+ " addu %0, $1 \n"
#endif
" .set pop"
: "=r" (sum)
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
int reg;
- struct thread_info *ti = task_thread_info(p);
- unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
- struct pt_regs *regs = (struct pt_regs *)ksp - 1;
#if (KGDB_GDB_REG_SIZE == 32)
u32 *ptr = (u32 *)gdb_regs;
#else
#endif
for (reg = 0; reg < 16; reg++)
- *(ptr++) = regs->regs[reg];
+ *(ptr++) = 0;
/* S0 - S7 */
- for (reg = 16; reg < 24; reg++)
- *(ptr++) = regs->regs[reg];
+ *(ptr++) = p->thread.reg16;
+ *(ptr++) = p->thread.reg17;
+ *(ptr++) = p->thread.reg18;
+ *(ptr++) = p->thread.reg19;
+ *(ptr++) = p->thread.reg20;
+ *(ptr++) = p->thread.reg21;
+ *(ptr++) = p->thread.reg22;
+ *(ptr++) = p->thread.reg23;
for (reg = 24; reg < 28; reg++)
*(ptr++) = 0;
/* GP, SP, FP, RA */
- for (reg = 28; reg < 32; reg++)
- *(ptr++) = regs->regs[reg];
-
- *(ptr++) = regs->cp0_status;
- *(ptr++) = regs->lo;
- *(ptr++) = regs->hi;
- *(ptr++) = regs->cp0_badvaddr;
- *(ptr++) = regs->cp0_cause;
- *(ptr++) = regs->cp0_epc;
+ *(ptr++) = (long)p;
+ *(ptr++) = p->thread.reg29;
+ *(ptr++) = p->thread.reg30;
+ *(ptr++) = p->thread.reg31;
+
+ *(ptr++) = p->thread.cp0_status;
+
+ /* lo, hi */
+ *(ptr++) = 0;
+ *(ptr++) = 0;
+
+ /*
+ * BadVAddr, Cause
+ * Ideally these would come from the last exception frame up the stack
+ * but that requires unwinding, otherwise we can't know much for sure.
+ */
+ *(ptr++) = 0;
+ *(ptr++) = 0;
+
+ /*
+ * PC
+ * use return address (RA), i.e. the moment after return from resume()
+ */
+ *(ptr++) = p->thread.reg31;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long addr = addr0;
+ unsigned long vm_start;
int do_color_align;
if (unlikely(len > TASK_SIZE))
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
- if (!vma || addr + len <= vma->vm_start)
+ if (!vma || addr + len <= vm_start_gap(vma))
return addr;
addr = vma->vm_end;
if (do_color_align)
/* make sure it can fit in the remaining address space */
if (likely(addr > len)) {
vma = find_vma(mm, addr - len);
- if (!vma || addr <= vma->vm_start) {
+ if (!vma || addr <= vm_start_gap(vma)) {
/* cache the address as a hint for next time */
return mm->free_area_cache = addr - len;
}
* return with success:
*/
vma = find_vma(mm, addr);
- if (likely(!vma || addr + len <= vma->vm_start)) {
+ if (vma)
+ vm_start = vm_start_gap(vma);
+ if (likely(!vma || addr + len <= vm_start)) {
/* cache the address as a hint for next time */
return mm->free_area_cache = addr;
}
/* remember the largest hole we saw so far */
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
+ if (addr + mm->cached_hole_size < vm_start)
+ mm->cached_hole_size = vm_start - addr;
/* try just below the current vma->vm_start */
- addr = vma->vm_start - len;
+ addr = vm_start - len;
if (do_color_align)
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
- } while (likely(len < vma->vm_start));
+ } while (likely(len < vm_start));
bottomup:
/*
#endif
#include <linux/compiler.h>
-#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
+#include <asm/types.h>
#include <asm/byteorder.h>
#include <linux/atomic.h>
* to include/asm-i386/bitops.h or kerneldoc
*/
+#if __BITS_PER_LONG == 64
+#define SHIFT_PER_LONG 6
+#else
+#define SHIFT_PER_LONG 5
+#endif
+
#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
*/
#if (defined(__KERNEL__) && defined(CONFIG_64BIT)) || defined (__LP64__)
#define __BITS_PER_LONG 64
-#define SHIFT_PER_LONG 6
#else
#define __BITS_PER_LONG 32
-#define SHIFT_PER_LONG 5
#endif
#include <asm-generic/bitsperlong.h>
#ifndef _PARISC_SWAB_H
#define _PARISC_SWAB_H
+#include <asm/bitsperlong.h>
#include <linux/types.h>
#include <linux/compiler.h>
}
#define __arch_swab32 __arch_swab32
-#if BITS_PER_LONG > 32
+#if __BITS_PER_LONG > 32
/*
** From "PA-RISC 2.0 Architecture", HP Professional Books.
** See Appendix I page 8 , "Endian Byte Swapping".
return x;
}
#define __arch_swab64 __arch_swab64
-#endif /* BITS_PER_LONG > 32 */
+#endif /* __BITS_PER_LONG > 32 */
#endif /* _PARISC_SWAB_H */
static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
+ unsigned long prev_end;
addr = PAGE_ALIGN(addr);
- for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
+ for (vma = find_vma_prev(current->mm, addr, &prev); ; prev = vma,
+ vma = vma->vm_next) {
+ if (prev) {
+ prev_end = vm_end_gap(prev);
+ if (addr < prev_end) {
+ addr = prev_end;
+ /* If vma already violates gap, forget it */
+ if (vma && addr > vma->vm_start)
+ addr = vma->vm_start;
+ }
+ }
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
- if (!vma || addr + len <= vma->vm_start)
+ if (!vma || addr + len <= vm_start_gap(vma))
return addr;
- addr = vma->vm_end;
}
}
static unsigned long get_shared_area(struct address_space *mapping,
unsigned long addr, unsigned long len, unsigned long pgoff)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
+ unsigned long prev_end;
int offset = mapping ? get_offset(mapping) : 0;
offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
addr = DCACHE_ALIGN(addr - offset) + offset;
- for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
+ for (vma = find_vma_prev(current->mm, addr, &prev); ; prev = vma,
+ vma = vma->vm_next) {
+ if (prev) {
+ prev_end = vm_end_gap(prev);
+ if (addr < prev_end) {
+ addr = DCACHE_ALIGN(prev_end - offset) + offset;
+ if (addr < prev_end) /* handle wraparound */
+ return -ENOMEM;
+ /* If vma already violates gap, forget it */
+ if (vma && addr > vma->vm_start)
+ addr = vma->vm_start;
+ }
+ }
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
- if (!vma || addr + len <= vma->vm_start)
+ if (!vma || addr + len <= vm_start_gap(vma))
return addr;
- addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
- if (addr < vma->vm_end) /* handle wraparound */
- return -ENOMEM;
}
}
mtsp %r0,%sr4 /* get kernel space into sr4 */
mtsp %r0,%sr5 /* get kernel space into sr5 */
mtsp %r0,%sr6 /* get kernel space into sr6 */
- mfsp %sr7,%r1 /* save user sr7 */
- mtsp %r1,%sr3 /* and store it in sr3 */
#ifdef CONFIG_64BIT
/* for now we can *always* set the W bit on entry to the syscall
depdi 0, 31, 32, %r21
1:
#endif
+
+ /* We use a rsm/ssm pair to prevent sr3 from being clobbered
+ * by external interrupts.
+ */
+ mfsp %sr7,%r1 /* save user sr7 */
+ rsm PSW_SM_I, %r0 /* disable interrupts */
+ mtsp %r1,%sr3 /* and store it in sr3 */
+
mfctl %cr30,%r1
xor %r1,%r30,%r30 /* ye olde xor trick */
xor %r1,%r30,%r1
*/
mtsp %r0,%sr7 /* get kernel space into sr7 */
+ ssm PSW_SM_I, %r0 /* enable interrupts */
STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
mfctl %cr30,%r1 /* get task ptr in %r1 */
LDREG TI_TASK(%r1),%r1
bctr
1:
- /* Save the value at addr zero for a null pointer write check later. */
-
- li r4, 0
- lwz r3, 0(r4)
-
/* Primary delays then goes to _zimage_start in wrapper. */
or 31, 31, 31 /* db16cyc */
flush_cache((void *)0x100, 512);
}
-void platform_init(unsigned long null_check)
+void platform_init(void)
{
const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */
void *chosen;
unsigned long ft_addr;
u64 rm_size;
- unsigned long val;
console_ops.write = ps3_console_write;
platform_ops.exit = ps3_exit;
printf(" flat tree at 0x%lx\n\r", ft_addr);
- val = *(unsigned long *)0;
-
- if (val != null_check)
- printf("null check failed: %lx != %lx\n\r", val, null_check);
-
((kernel_entry_t)0)(ft_addr, 0, NULL);
ps3_exit();
#define PPC_INST_MCRXR 0x7c000400
#define PPC_INST_MCRXR_MASK 0xfc0007fe
#define PPC_INST_MFSPR_PVR 0x7c1f42a6
-#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
+#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe
#define PPC_INST_MSGSND 0x7c00019c
#define PPC_INST_NOP 0x60000000
#define PPC_INST_POPCNTB 0x7c0000f4
#define PPC_INST_RFDI 0x4c00004e
#define PPC_INST_RFMCI 0x4c00004c
#define PPC_INST_MFSPR_DSCR 0x7c1102a6
-#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_MASK 0xfc1ffffe
#define PPC_INST_MTSPR_DSCR 0x7c1103a6
-#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_MASK 0xfc1ffffe
#define PPC_INST_STRING 0x7c00042a
#define PPC_INST_STRING_MASK 0xfc0007fe
nb = aligninfo[instr].len;
flags = aligninfo[instr].flags;
- /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
- if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
- nb = 8;
- flags = LD+SW;
- } else if (IS_XFORM(instruction) &&
- ((instruction >> 1) & 0x3ff) == 660) {
- nb = 8;
- flags = ST+SW;
+ /*
+ * Handle some cases which give overlaps in the DSISR values.
+ */
+ if (IS_XFORM(instruction)) {
+ switch ((instruction >> 1) & 0x3ff) {
+ case 532: /* ldbrx */
+ nb = 8;
+ flags = LD+SW;
+ break;
+ case 660: /* stdbrx */
+ nb = 8;
+ flags = ST+SW;
+ break;
+ case 20: /* lwarx */
+ case 84: /* ldarx */
+ case 116: /* lharx */
+ case 276: /* lqarx */
+ return 0; /* not emulated ever */
+ }
}
/* Byteswap little endian loads and stores */
rcu_read_lock();
bp = __get_cpu_var(bp_per_reg);
- if (!bp)
+ if (!bp) {
+ rc = NOTIFY_DONE;
goto out;
+ }
info = counter_arch_bp(bp);
/*
static int ibmebus_create_devices(const struct of_device_id *matches)
{
struct device_node *root, *child;
+ struct device *dev;
int ret = 0;
root = of_find_node_by_path("/");
if (!of_match_node(matches, child))
continue;
- if (bus_find_device(&ibmebus_bus_type, NULL, child,
- ibmebus_match_node))
+ dev = bus_find_device(&ibmebus_bus_type, NULL, child,
+ ibmebus_match_node);
+ if (dev) {
+ put_device(dev);
continue;
+ }
ret = ibmebus_create_device(child);
if (ret) {
const char *buf, size_t count)
{
struct device_node *dn = NULL;
+ struct device *dev;
char *path;
ssize_t rc = 0;
if (!path)
return -ENOMEM;
- if (bus_find_device(&ibmebus_bus_type, NULL, path,
- ibmebus_match_path)) {
+ dev = bus_find_device(&ibmebus_bus_type, NULL, path,
+ ibmebus_match_path);
+ if (dev) {
+ put_device(dev);
printk(KERN_WARNING "%s: %s has already been probed\n",
__func__, path);
rc = -EEXIST;
if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path,
ibmebus_match_path))) {
of_device_unregister(to_platform_device(dev));
+ put_device(dev);
kfree(path);
return count;
std r0,0(r1)
ptesync
ld r0,0(r1)
-1: cmp cr0,r0,r0
+1: cmpd cr0,r0,r0
bne 1b
PPC_NAP
b .
/* Make partition a free partition */
part->header.signature = NVRAM_SIG_FREE;
- strncpy(part->header.name, "wwwwwwwwwwww", 12);
+ memset(part->header.name, 'w', 12);
part->header.checksum = nvram_checksum(&part->header);
rc = nvram_write_header(part);
if (rc <= 0) {
}
if (prev) {
prev->header.length += part->header.length;
- prev->header.checksum = nvram_checksum(&part->header);
- rc = nvram_write_header(part);
+ prev->header.checksum = nvram_checksum(&prev->header);
+ rc = nvram_write_header(prev);
if (rc <= 0) {
printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
return rc;
flush_fp_to_thread(target);
#ifdef CONFIG_VSX
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.TS_FPR(i);
+ memcpy(&buf[32], &target->thread.fpscr, sizeof(double));
+
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
flush_vsx_to_thread(target);
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET];
+
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
for (i = 0; i < 32 ; i++)
bl V_LOCAL_FUNC(__get_datapage)
mtlr r12
addi r3,r3,CFG_SYSCALL_MAP64
- cmpli cr0,r4,0
+ cmpldi cr0,r4,0
crclr cr0*4+so
beqlr
li r0,__NR_syscalls
bne cr0,99f
li r3,0
- cmpli cr0,r4,0
+ cmpldi cr0,r4,0
crclr cr0*4+so
beqlr
lis r5,CLOCK_REALTIME_RES@h
addi r3,r3,8
171:
177:
+179:
addi r3,r3,8
370:
372:
173:
174:
175:
-179:
181:
184:
186:
if ((mm->task_size - len) < addr)
return 0;
vma = find_vma(mm, addr);
- return (!vma || (addr + len) <= vma->vm_start);
+ return (!vma || (addr + len) <= vm_start_gap(vma));
}
static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
int psize, int use_cache)
{
struct vm_area_struct *vma;
- unsigned long start_addr, addr;
+ unsigned long start_addr, addr, vm_start;
struct slice_mask mask;
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
continue;
}
- if (!vma || addr + len <= vma->vm_start) {
+ if (vma)
+ vm_start = vm_start_gap(vma);
+ if (!vma || addr + len <= vm_start) {
/*
* Remember the place where we stopped the search:
*/
mm->free_area_cache = addr + len;
return addr;
}
- if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
+ if (use_cache && (addr + mm->cached_hole_size) < vm_start)
+ mm->cached_hole_size = vm_start - addr;
addr = vma->vm_end;
}
int psize, int use_cache)
{
struct vm_area_struct *vma;
- unsigned long addr;
+ unsigned long addr, vm_start;
struct slice_mask mask;
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
* return with success:
*/
vma = find_vma(mm, addr);
- if (!vma || (addr + len) <= vma->vm_start) {
+ if (vma)
+ vm_start = vm_start_gap(vma);
+ if (!vma || (addr + len) <= vm_start) {
/* remember the address as a hint for next time */
if (use_cache)
mm->free_area_cache = addr;
}
/* remember the largest hole we saw so far */
- if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
+ if (use_cache && (addr + mm->cached_hole_size) < vm_start)
+ mm->cached_hole_size = vm_start - addr;
/* try just below the current vma->vm_start */
- addr = vma->vm_start;
+ addr = vm_start;
}
/*
unsigned long decompress_kernel(void)
{
- unsigned long output_addr;
- unsigned char *output;
+ void *output, *kernel_end;
- output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
- check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
- memset(&_bss, 0, &_ebss - &_bss);
- free_mem_ptr = (unsigned long)&_end;
- free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
- output = (unsigned char *) output_addr;
+ output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
+ kernel_end = output + SZ__bss_start;
+ check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
#ifdef CONFIG_BLK_DEV_INITRD
/*
* Move the initrd right behind the end of the decompressed
- * kernel image.
+ * kernel image. This also prevents initrd corruption caused by
+ * bss clearing since kernel_end will always be located behind the
+ * current bss section..
*/
- if (INITRD_START && INITRD_SIZE &&
- INITRD_START < (unsigned long) output + SZ__bss_start) {
- check_ipl_parmblock(output + SZ__bss_start,
- INITRD_START + INITRD_SIZE);
- memmove(output + SZ__bss_start,
- (void *) INITRD_START, INITRD_SIZE);
- INITRD_START = (unsigned long) output + SZ__bss_start;
+ if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
+ check_ipl_parmblock(kernel_end, INITRD_SIZE);
+ memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
+ INITRD_START = (unsigned long) kernel_end;
}
#endif
+ /*
+ * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
+ * initialized afterwards since they reside in bss.
+ */
+ memset(&_bss, 0, &_ebss - &_bss);
+ free_mem_ptr = (unsigned long) &_end;
+ free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+
puts("Uncompressing Linux... ");
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
puts("Ok, booting the kernel.\n");
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- unsigned long start_addr;
+ unsigned long start_addr, vm_start;
int do_colour_align;
if (flags & MAP_FIXED) {
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
}
return -ENOMEM;
}
- if (likely(!vma || addr + len <= vma->vm_start)) {
+ if (vma)
+ vm_start = vm_start_gap(vma);
+ if (likely(!vma || addr + len <= vm_start)) {
/*
* Remember the place where we stopped the search:
*/
mm->free_area_cache = addr + len;
return addr;
}
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
+ if (addr + mm->cached_hole_size < vm_start)
+ mm->cached_hole_size = vm_start - addr;
addr = vma->vm_end;
if (do_colour_align)
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
+ unsigned long vm_start;
int do_colour_align;
if (flags & MAP_FIXED) {
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
/* make sure it can fit in the remaining address space */
if (likely(addr > len)) {
vma = find_vma(mm, addr-len);
- if (!vma || addr <= vma->vm_start) {
+ if (!vma || addr <= vm_start_gap(vma)) {
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr-len);
}
* return with success:
*/
vma = find_vma(mm, addr);
- if (likely(!vma || addr+len <= vma->vm_start)) {
+ if (vma)
+ vm_start = vm_start_gap(vma);
+ if (likely(!vma || addr + len <= vm_start)) {
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr);
}
/* remember the largest hole we saw so far */
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
+ if (addr + mm->cached_hole_size < vm_start)
+ mm->cached_hole_size = vm_start - addr;
/* try just below the current vma->vm_start */
- addr = vma->vm_start-len;
+ addr = vm_start-len;
if (do_colour_align)
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
- } while (likely(len < vma->vm_start));
+ } while (likely(len < vm_start));
bottomup:
/*
/* Find GPTIMER Timer Registers base address otherwise bail out. */
nnp = rootnp;
- do {
- np = of_find_node_by_name(nnp, "GAISLER_GPTIMER");
- if (!np) {
- np = of_find_node_by_name(nnp, "01_011");
- if (!np)
- goto bad;
- }
- ampopts = 0;
- pp = of_find_property(np, "ampopts", &len);
- if (pp) {
- ampopts = *(int *)pp->value;
- if (ampopts == 0) {
- /* Skip this instance, resource already
- * allocated by other OS */
- nnp = np;
- continue;
- }
+retry:
+ np = of_find_node_by_name(nnp, "GAISLER_GPTIMER");
+ if (!np) {
+ np = of_find_node_by_name(nnp, "01_011");
+ if (!np)
+ goto bad;
+ }
+
+ ampopts = 0;
+ pp = of_find_property(np, "ampopts", &len);
+ if (pp) {
+ ampopts = *(int *)pp->value;
+ if (ampopts == 0) {
+ /* Skip this instance, resource already
+ * allocated by other OS */
+ nnp = np;
+ goto retry;
}
+ }
+
+ /* Select Timer-Instance on Timer Core. Default is zero */
+ leon3_gptimer_idx = ampopts & 0x7;
- /* Select Timer-Instance on Timer Core. Default is zero */
- leon3_gptimer_idx = ampopts & 0x7;
-
- pp = of_find_property(np, "reg", &len);
- if (pp)
- leon3_gptimer_regs = *(struct leon3_gptimer_regs_map **)
- pp->value;
- pp = of_find_property(np, "interrupts", &len);
- if (pp)
- leon3_gptimer_irq = *(unsigned int *)pp->value;
- } while (0);
+ pp = of_find_property(np, "reg", &len);
+ if (pp)
+ leon3_gptimer_regs = *(struct leon3_gptimer_regs_map **)
+ pp->value;
+ pp = of_find_property(np, "interrupts", &len);
+ if (pp)
+ leon3_gptimer_irq = *(unsigned int *)pp->value;
if (!(leon3_gptimer_regs && leon3_irqctrl_regs && leon3_gptimer_irq))
goto bad;
}
if (TASK_SIZE - PAGE_SIZE - len < addr)
return -ENOMEM;
- if (!vmm || addr + len <= vmm->vm_start)
+ if (!vmm || addr + len <= vm_start_gap(vmm))
return addr;
addr = vmm->vm_end;
if (flags & MAP_SHARED)
struct mm_struct *mm = current->mm;
struct vm_area_struct * vma;
unsigned long task_size = TASK_SIZE;
- unsigned long start_addr;
+ unsigned long start_addr, vm_start;
int do_color_align;
if (flags & MAP_FIXED) {
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
}
return -ENOMEM;
}
- if (likely(!vma || addr + len <= vma->vm_start)) {
+ if (vma)
+ vm_start = vm_start_gap(vma);
+ if (likely(!vma || addr + len <= vm_start)) {
/*
* Remember the place where we stopped the search:
*/
mm->free_area_cache = addr + len;
return addr;
}
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
+ if (addr + mm->cached_hole_size < vm_start)
+ mm->cached_hole_size = vm_start - addr;
addr = vma->vm_end;
if (do_color_align)
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long task_size = STACK_TOP32;
- unsigned long addr = addr0;
+ unsigned long addr = addr0, vm_start;
int do_color_align;
/* This should only ever run for 32-bit processes. */
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
/* make sure it can fit in the remaining address space */
if (likely(addr > len)) {
vma = find_vma(mm, addr-len);
- if (!vma || addr <= vma->vm_start) {
+ if (!vma || addr <= vm_start_gap(vma)) {
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr-len);
}
* return with success:
*/
vma = find_vma(mm, addr);
- if (likely(!vma || addr+len <= vma->vm_start)) {
+ if (vma)
+ vm_start = vm_start_gap(vma);
+ if (likely(!vma || addr + len <= vm_start)) {
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr);
}
/* remember the largest hole we saw so far */
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
+ if (addr + mm->cached_hole_size < vm_start)
+ mm->cached_hole_size = vm_start - addr;
/* try just below the current vma->vm_start */
- addr = vma->vm_start-len;
+ addr = vm_start - len;
if (do_color_align)
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
- } while (likely(len < vma->vm_start));
+ } while (likely(len < vm_start));
bottomup:
/*
struct mm_struct *mm = current->mm;
struct vm_area_struct * vma;
unsigned long task_size = TASK_SIZE;
- unsigned long start_addr;
+ unsigned long start_addr, vm_start;
if (test_thread_flag(TIF_32BIT))
task_size = STACK_TOP32;
}
return -ENOMEM;
}
- if (likely(!vma || addr + len <= vma->vm_start)) {
+ if (vma)
+ vm_start = vm_start_gap(vma);
+ if (likely(!vma || addr + len <= vm_start)) {
/*
* Remember the place where we stopped the search:
*/
mm->free_area_cache = addr + len;
return addr;
}
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
+ if (addr + mm->cached_hole_size < vm_start)
+ mm->cached_hole_size = vm_start - addr;
addr = ALIGN(vma->vm_end, HPAGE_SIZE);
}
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
+ unsigned long vm_start;
/* This should only ever run for 32-bit processes. */
BUG_ON(!test_thread_flag(TIF_32BIT));
/* make sure it can fit in the remaining address space */
if (likely(addr > len)) {
vma = find_vma(mm, addr-len);
- if (!vma || addr <= vma->vm_start) {
+ if (!vma || addr <= vm_start_gap(vma)) {
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr-len);
}
* return with success:
*/
vma = find_vma(mm, addr);
- if (likely(!vma || addr+len <= vma->vm_start)) {
+ if (vma)
+ vm_start = vm_start_gap(vma);
+ if (likely(!vma || addr + len <= vm_start)) {
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr);
}
/* remember the largest hole we saw so far */
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
+ if (addr + mm->cached_hole_size < vm_start)
+ mm->cached_hole_size = vm_start - addr;
/* try just below the current vma->vm_start */
- addr = (vma->vm_start-len) & HPAGE_MASK;
- } while (likely(len < vma->vm_start));
+ addr = (vm_start - len) & HPAGE_MASK;
+ } while (likely(len < vm_start));
bottomup:
/*
addr = ALIGN(addr, HPAGE_SIZE);
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- unsigned long start_addr;
+ unsigned long start_addr, vm_start;
if (len > mm->cached_hole_size) {
start_addr = mm->free_area_cache;
}
return -ENOMEM;
}
- if (!vma || addr + len <= vma->vm_start) {
+ if (vma)
+ vm_start = vm_start_gap(vma);
+ if (!vma || addr + len <= vm_start) {
mm->free_area_cache = addr + len;
return addr;
}
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
+ if (addr + mm->cached_hole_size < vm_start)
+ mm->cached_hole_size = vm_start - addr;
addr = ALIGN(vma->vm_end, huge_page_size(h));
}
}
struct vm_area_struct *vma, *prev_vma;
unsigned long base = mm->mmap_base, addr = addr0;
unsigned long largest_hole = mm->cached_hole_size;
+ unsigned long vm_start;
int first_time = 1;
/* don't allow allocations above current base */
/*
* new region fits between prev_vma->vm_end and
- * vma->vm_start, use it:
+ * vm_start, use it:
*/
- if (addr + len <= vma->vm_start &&
+ vm_start = vm_start_gap(vma);
+ if (addr + len <= vm_start &&
(!prev_vma || (addr >= prev_vma->vm_end))) {
/* remember the address as a hint for next time */
mm->cached_hole_size = largest_hole;
}
/* remember the largest hole we saw so far */
- if (addr + largest_hole < vma->vm_start)
- largest_hole = vma->vm_start - addr;
+ if (addr + largest_hole < vm_start)
+ largest_hole = vm_start - addr;
/* try just below the current vma->vm_start */
- addr = (vma->vm_start - len) & huge_page_mask(h);
+ addr = (vm_start - len) & huge_page_mask(h);
- } while (len <= vma->vm_start);
+ } while (len <= vm_start);
fail:
/*
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (current->mm->get_unmapped_area == arch_get_unmapped_area)
#define AT_SYSINFO 32
-#define COMPAT_ARCH_DLINFO ARCH_DLINFO_IA32(sysctl_vsyscall32)
+#define COMPAT_ARCH_DLINFO ARCH_DLINFO_IA32(VDSO_CURRENT_BASE)
#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
#define __get_user_asm_u64(x, ptr, retval, errret) \
__get_user_asm(x, ptr, retval, "q", "", "=r", errret)
#define __get_user_asm_ex_u64(x, ptr) \
- __get_user_asm_ex(x, ptr, "q", "", "=r")
+ __get_user_asm_ex(x, ptr, "q", "", "=&r")
#endif
#define __get_user_size(x, ptr, size, retval, errret) \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
- __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
+ __get_user_asm_ex(x, ptr, "b", "b", "=&q"); \
break; \
case 2: \
- __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
+ __get_user_asm_ex(x, ptr, "w", "w", "=&r"); \
break; \
case 4: \
- __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
+ __get_user_asm_ex(x, ptr, "l", "k", "=&r"); \
break; \
case 8: \
__get_user_asm_ex_u64(x, ptr); \
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
"2:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \
- : ltype(x) : "m" (__m(addr)))
+ : ltype(x) : "m" (__m(addr)), "0" (0))
#define __put_user_nocheck(x, ptr, size) \
({ \
{
int bit;
- if (get_option(&arg, &bit) && bit < NCAPINTS*32)
+ if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
setup_clear_cpu_cap(bit);
else
return 0;
unsigned long return_hooker = (unsigned long)
&return_to_handler;
+ /*
+ * When resuming from suspend-to-ram, this function can be indirectly
+ * called from early CPU startup code while the CPU is in real mode,
+ * which would fail miserably. Make sure the stack pointer is a
+ * virtual address.
+ *
+ * This check isn't as accurate as virt_addr_valid(), but it should be
+ * good enough for this purpose, and it's fast.
+ */
+ if (unlikely((long)__builtin_frame_address(0) >= 0))
+ return;
+
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
return;
/* were we called with bad_dma_address? */
badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
- if (unlikely((dma_addr >= DMA_ERROR_CODE) && (dma_addr < badend))) {
+ if (unlikely(dma_addr < badend)) {
WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
"address 0x%Lx\n", dma_addr);
return;
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- unsigned long start_addr;
+ unsigned long start_addr, vm_start;
unsigned long begin, end;
if (flags & MAP_FIXED)
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (end - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
}
return -ENOMEM;
}
- if (!vma || addr + len <= vma->vm_start) {
+ if (vma)
+ vm_start = vm_start_gap(vma);
+ if (!vma || addr + len <= vm_start) {
/*
* Remember the place where we stopped the search:
*/
mm->free_area_cache = addr + len;
return addr;
}
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
+ if (addr + mm->cached_hole_size < vm_start)
+ mm->cached_hole_size = vm_start - addr;
addr = vma->vm_end;
addr = align_addr(addr, filp, 0);
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
+ unsigned long vm_start;
/* requested length too big for entire address space */
if (len > TASK_SIZE)
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
ALIGN_TOPDOWN);
vma = find_vma(mm, tmp_addr);
- if (!vma || tmp_addr + len <= vma->vm_start)
+ if (!vma || tmp_addr + len <= vm_start_gap(vma))
/* remember the address as a hint for next time */
return mm->free_area_cache = tmp_addr;
}
* return with success:
*/
vma = find_vma(mm, addr);
- if (!vma || addr+len <= vma->vm_start)
+ if (vma)
+ vm_start = vm_start_gap(vma);
+ if (!vma || addr + len <= vm_start)
/* remember the address as a hint for next time */
return mm->free_area_cache = addr;
/* remember the largest hole we saw so far */
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
+ if (addr + mm->cached_hole_size < vm_start)
+ mm->cached_hole_size = vm_start - addr;
/* try just below the current vma->vm_start */
- addr = vma->vm_start-len;
- } while (len < vma->vm_start);
+ addr = vm_start - len;
+ } while (len < vm_start);
bottomup:
/*
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
- unsigned short sel, old_sel;
- struct desc_struct old_desc, new_desc;
- const struct x86_emulate_ops *ops = ctxt->ops;
+ unsigned short sel;
+ struct desc_struct new_desc;
u8 cpl = ctxt->ops->cpl(ctxt);
- /* Assignment of RIP may only fail in 64-bit mode */
- if (ctxt->mode == X86EMUL_MODE_PROT64)
- ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
- VCPU_SREG_CS);
-
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
- if (rc != X86EMUL_CONTINUE) {
- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
- /* assigning eip failed; restore the old cs */
- ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
- return rc;
- }
+ /* Error handling is not implemented. */
+ if (rc != X86EMUL_CONTINUE)
+ return X86EMUL_UNHANDLEABLE;
+
return rc;
}
{
int rc;
unsigned long eip, cs;
- u16 old_cs;
int cpl = ctxt->ops->cpl(ctxt);
- struct desc_struct old_desc, new_desc;
- const struct x86_emulate_ops *ops = ctxt->ops;
-
- if (ctxt->mode == X86EMUL_MODE_PROT64)
- ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
- VCPU_SREG_CS);
+ struct desc_struct new_desc;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, eip, new_desc.l);
- if (rc != X86EMUL_CONTINUE) {
- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
- ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
- }
+ /* Error handling is not implemented. */
+ if (rc != X86EMUL_CONTINUE)
+ return X86EMUL_UNHANDLEABLE;
+
return rc;
}
return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
}
-static inline bool is_exception(u32 intr_info)
+static inline bool is_nmi(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
- == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
+ == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
}
static void nested_vmx_vmexit(struct kvm_vcpu *vcpu);
return 0;
}
- if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
+ if (is_nmi(intr_info))
return 1; /* already handled by vmx_vcpu_run() */
if (is_no_device(intr_info)) {
switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
- if (!is_exception(intr_info))
+ if (is_nmi(intr_info))
return 0;
else if (is_page_fault(intr_info))
return enable_ept;
kvm_machine_check();
/* We need to handle NMIs before interrupts are enabled */
- if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
- (exit_intr_info & INTR_INFO_VALID_MASK)) {
+ if (is_nmi(exit_intr_info)) {
kvm_before_handle_nmi(&vmx->vcpu);
asm("int $2");
kvm_after_handle_nmi(&vmx->vcpu);
struct kvm_shared_msrs *locals
= container_of(urn, struct kvm_shared_msrs, urn);
struct kvm_shared_msr_values *values;
+ unsigned long flags;
+ /*
+ * Disabling irqs at this point since the following code could be
+ * interrupted and executed through kvm_arch_hardware_disable()
+ */
+ local_irq_save(flags);
+ if (locals->registered) {
+ locals->registered = false;
+ user_return_notifier_unregister(urn);
+ }
+ local_irq_restore(flags);
for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
values = &locals->values[slot];
if (values->host != values->curr) {
values->curr = values->host;
}
}
- locals->registered = false;
- user_return_notifier_unregister(urn);
}
static void shared_msr_update(unsigned slot, u32 msr)
#ifdef CONFIG_X86_64
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif
- MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
- MSR_TSC_AUX,
+ MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
};
static unsigned num_msrs_to_save;
for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
continue;
-
- /*
- * Even MSRs that are valid in the host may not be exposed
- * to the guests in some cases.
- */
- switch (msrs_to_save[i]) {
- case MSR_TSC_AUX:
- if (!kvm_x86_ops->rdtscp_supported())
- continue;
- break;
- default:
- break;
- }
-
if (j < i)
msrs_to_save[j] = msrs_to_save[i];
j++;
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
+ void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
+
kvmclock_reset(vcpu);
- free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
fx_free(vcpu);
kvm_x86_ops->vcpu_free(vcpu);
+ free_cpumask_var(wbinvd_dirty_mask);
}
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- unsigned long start_addr;
+ unsigned long start_addr, vm_start;
if (len > mm->cached_hole_size) {
start_addr = mm->free_area_cache;
}
return -ENOMEM;
}
- if (!vma || addr + len <= vma->vm_start) {
+ if (vma)
+ vm_start = vm_start_gap(vma);
+ if (!vma || addr + len <= vm_start) {
mm->free_area_cache = addr + len;
return addr;
}
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
+ if (addr + mm->cached_hole_size < vm_start)
+ mm->cached_hole_size = vm_start - addr;
addr = ALIGN(vma->vm_end, huge_page_size(h));
}
}
struct vm_area_struct *vma, *prev_vma;
unsigned long base = mm->mmap_base, addr = addr0;
unsigned long largest_hole = mm->cached_hole_size;
+ unsigned long vm_start;
int first_time = 1;
/* don't allow allocations above current base */
* new region fits between prev_vma->vm_end and
* vma->vm_start, use it:
*/
- if (addr + len <= vma->vm_start &&
+ vm_start = vm_start_gap(vma);
+ if (addr + len <= vm_start &&
(!prev_vma || (addr >= prev_vma->vm_end))) {
/* remember the address as a hint for next time */
mm->cached_hole_size = largest_hole;
}
/* remember the largest hole we saw so far */
- if (addr + largest_hole < vma->vm_start)
- largest_hole = vma->vm_start - addr;
+ if (addr + largest_hole < vm_start)
+ largest_hole = vm_start - addr;
/* try just below the current vma->vm_start */
- addr = (vma->vm_start - len) & huge_page_mask(h);
- } while (len <= vma->vm_start);
+ addr = (vm_start - len) & huge_page_mask(h);
+ } while (len <= vm_start);
fail:
/*
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
* devmem_is_allowed() checks to see if /dev/mem access to a certain address
* is valid. The argument is a physical page number.
*
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains bios code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
+ * On x86, access has to be given to the first megabyte of RAM because that
+ * area traditionally contains BIOS code and data regions used by X, dosemu,
+ * and similar apps. Since they map the entire memory range, the whole range
+ * must be allowed (for mapping), but any areas that would otherwise be
+ * disallowed are flagged as being "zero filled" instead of rejected.
+ * Access has to be given to non-kernel-ram areas as well, these contain the
+ * PCI mmio resources as well as potential bios/acpi data regions.
*/
int devmem_is_allowed(unsigned long pagenr)
{
- if (pagenr <= 256)
- return 1;
- if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+ if (page_is_ram(pagenr)) {
+ /*
+ * For disallowed memory regions in the low 1MB range,
+ * request that the page be shown as all zeros.
+ */
+ if (pagenr < 256)
+ return 2;
+
+ return 0;
+ }
+
+ /*
+ * This must follow RAM test, since System RAM is considered a
+ * restricted resource under CONFIG_STRICT_IOMEM.
+ */
+ if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
+ /* Low 1MB bypasses iomem restrictions. */
+ if (pagenr < 256)
+ return 1;
+
return 0;
- if (!page_is_ram(pagenr))
- return 1;
- return 0;
+ }
+
+ return 1;
}
void free_init_pages(char *what, unsigned long begin, unsigned long end)
return 1;
list_for_each_entry(msidesc, &dev->msi_list, list) {
- __read_msi_msg(msidesc, &msg);
- pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
- ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
- if (msg.data != XEN_PIRQ_MSI_DATA ||
- xen_irq_from_pirq(pirq) < 0) {
- pirq = xen_allocate_pirq_msi(dev, msidesc);
- if (pirq < 0) {
- irq = -ENODEV;
- goto error;
- }
- xen_msi_compose_msg(dev, pirq, &msg);
- __write_msi_msg(msidesc, &msg);
- dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
- } else {
- dev_dbg(&dev->dev,
- "xen: msi already bound to pirq=%d\n", pirq);
+ pirq = xen_allocate_pirq_msi(dev, msidesc);
+ if (pirq < 0) {
+ irq = -ENODEV;
+ goto error;
}
+ xen_msi_compose_msg(dev, pirq, &msg);
+ __write_msi_msg(msidesc, &msg);
+ dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0,
(type == PCI_CAP_ID_MSIX) ?
"msi-x" : "msi",
dprintk("%s: write %Zd bytes\n", bd->name, count);
+ if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
+ return -EINVAL;
+
bsg_set_block(bd, file);
bytes_written = 0;
crypto_completion_t complete;
void *data;
u8 *result;
+ u32 flags;
void *ubuf[] CRYPTO_MINALIGN_ATTR;
};
return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
}
-static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
+static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
{
- struct ahash_request_priv *priv = req->priv;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ unsigned int ds = crypto_ahash_digestsize(tfm);
+ struct ahash_request_priv *priv;
- if (err == -EINPROGRESS)
- return;
+ priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
+ (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!priv)
+ return -ENOMEM;
+
+ /*
+ * WARNING: Voodoo programming below!
+ *
+ * The code below is obscure and hard to understand, thus explanation
+ * is necessary. See include/crypto/hash.h and include/linux/crypto.h
+ * to understand the layout of structures used here!
+ *
+ * The code here will replace portions of the ORIGINAL request with
+ * pointers to new code and buffers so the hashing operation can store
+ * the result in aligned buffer. We will call the modified request
+ * an ADJUSTED request.
+ *
+ * The newly mangled request will look as such:
+ *
+ * req {
+ * .result = ADJUSTED[new aligned buffer]
+ * .base.complete = ADJUSTED[pointer to completion function]
+ * .base.data = ADJUSTED[*req (pointer to self)]
+ * .priv = ADJUSTED[new priv] {
+ * .result = ORIGINAL(result)
+ * .complete = ORIGINAL(base.complete)
+ * .data = ORIGINAL(base.data)
+ * }
+ */
+
+ priv->result = req->result;
+ priv->complete = req->base.complete;
+ priv->data = req->base.data;
+ priv->flags = req->base.flags;
+
+ /*
+ * WARNING: We do not backup req->priv here! The req->priv
+ * is for internal use of the Crypto API and the
+ * user must _NOT_ _EVER_ depend on it's content!
+ */
+
+ req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
+ req->base.complete = cplt;
+ req->base.data = req;
+ req->priv = priv;
+
+ return 0;
+}
+
+static void ahash_restore_req(struct ahash_request *req, int err)
+{
+ struct ahash_request_priv *priv = req->priv;
if (!err)
memcpy(priv->result, req->result,
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+ /* Restore the original crypto request. */
+ req->result = priv->result;
+
+ ahash_request_set_callback(req, priv->flags,
+ priv->complete, priv->data);
+ req->priv = NULL;
+
+ /* Free the req->priv.priv from the ADJUSTED request. */
kzfree(priv);
}
+static void ahash_notify_einprogress(struct ahash_request *req)
+{
+ struct ahash_request_priv *priv = req->priv;
+ struct crypto_async_request oreq;
+
+ oreq.data = priv->data;
+
+ priv->complete(&oreq, -EINPROGRESS);
+}
+
static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
{
struct ahash_request *areq = req->data;
- struct ahash_request_priv *priv = areq->priv;
- crypto_completion_t complete = priv->complete;
- void *data = priv->data;
- ahash_op_unaligned_finish(areq, err);
+ if (err == -EINPROGRESS) {
+ ahash_notify_einprogress(areq);
+ return;
+ }
+
+ /*
+ * Restore the original request, see ahash_op_unaligned() for what
+ * goes where.
+ *
+ * The "struct ahash_request *req" here is in fact the "req.base"
+ * from the ADJUSTED request from ahash_op_unaligned(), thus as it
+ * is a pointer to self, it is also the ADJUSTED "req" .
+ */
- complete(data, err);
+ /* First copy req->result into req->priv.result */
+ ahash_restore_req(areq, err);
+
+ /* Complete the ORIGINAL request. */
+ areq->base.complete(&areq->base, err);
}
static int ahash_op_unaligned(struct ahash_request *req,
int (*op)(struct ahash_request *))
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- unsigned long alignmask = crypto_ahash_alignmask(tfm);
- unsigned int ds = crypto_ahash_digestsize(tfm);
- struct ahash_request_priv *priv;
int err;
- priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
- (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC);
- if (!priv)
- return -ENOMEM;
-
- priv->result = req->result;
- priv->complete = req->base.complete;
- priv->data = req->base.data;
-
- req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
- req->base.complete = ahash_op_unaligned_done;
- req->base.data = req;
- req->priv = priv;
+ err = ahash_save_req(req, ahash_op_unaligned_done);
+ if (err)
+ return err;
err = op(req);
- ahash_op_unaligned_finish(req, err);
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return err;
+
+ ahash_restore_req(req, err);
return err;
}
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
-static void ahash_def_finup_finish2(struct ahash_request *req, int err)
+static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
{
- struct ahash_request_priv *priv = req->priv;
+ struct ahash_request *areq = req->data;
if (err == -EINPROGRESS)
return;
- if (!err)
- memcpy(priv->result, req->result,
- crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
-
- kzfree(priv);
-}
+ ahash_restore_req(areq, err);
-static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
-{
- struct ahash_request *areq = req->data;
- struct ahash_request_priv *priv = areq->priv;
- crypto_completion_t complete = priv->complete;
- void *data = priv->data;
-
- ahash_def_finup_finish2(areq, err);
-
- complete(data, err);
+ areq->base.complete(&areq->base, err);
}
static int ahash_def_finup_finish1(struct ahash_request *req, int err)
goto out;
req->base.complete = ahash_def_finup_done2;
- req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
err = crypto_ahash_reqtfm(req)->final(req);
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return err;
out:
- ahash_def_finup_finish2(req, err);
+ ahash_restore_req(req, err);
return err;
}
static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
{
struct ahash_request *areq = req->data;
- struct ahash_request_priv *priv = areq->priv;
- crypto_completion_t complete = priv->complete;
- void *data = priv->data;
+
+ if (err == -EINPROGRESS) {
+ ahash_notify_einprogress(areq);
+ return;
+ }
+
+ areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
err = ahash_def_finup_finish1(areq, err);
+ if (areq->priv)
+ return;
- complete(data, err);
+ areq->base.complete(&areq->base, err);
}
static int ahash_def_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- unsigned long alignmask = crypto_ahash_alignmask(tfm);
- unsigned int ds = crypto_ahash_digestsize(tfm);
- struct ahash_request_priv *priv;
-
- priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
- (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC);
- if (!priv)
- return -ENOMEM;
+ int err;
- priv->result = req->result;
- priv->complete = req->base.complete;
- priv->data = req->base.data;
+ err = ahash_save_req(req, ahash_def_finup_done1);
+ if (err)
+ return err;
- req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
- req->base.complete = ahash_def_finup_done1;
- req->base.data = req;
- req->priv = priv;
+ err = tfm->update(req);
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return err;
- return ahash_def_finup_finish1(req, tfm->update(req));
+ return ahash_def_finup_finish1(req, err);
}
static int ahash_no_export(struct ahash_request *req, void *out)
struct crypto_larval *larval;
int err;
+ alg->cra_flags &= ~CRYPTO_ALG_DEAD;
err = crypto_check_alg(alg);
if (err)
return err;
struct crypto_ablkcipher *ctr = ctx->ctr;
struct {
be128 hash;
- u8 iv[8];
+ u8 iv[16];
struct crypto_gcm_setkey_result result;
# Makefile for the Linux ACPI interpreter
#
-ccflags-y := -Os
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
#
ghes_do_proc(ghes->estatus);
out:
ghes_clear_estatus(ghes);
- return 0;
+ return rc;
}
static void ghes_add_timer(struct ghes *ghes)
if (list_empty(&ghes_sci))
unregister_acpi_hed_notifier(&ghes_notifier_sci);
mutex_unlock(&ghes_list_mutex);
+ synchronize_rcu();
break;
case ACPI_HEST_NOTIFY_NMI:
mutex_lock(&ghes_list_mutex);
return -EINVAL;
/* The state of the list is 'on' IFF all resources are 'on'. */
-
+ cur_state = 0;
for (i = 0; i < list->count; i++) {
struct acpi_power_resource *resource;
acpi_handle handle = list->handles[i];
host->iomap = NULL;
hpriv->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
+ if (!hpriv->base)
+ return -ENOMEM;
+
hpriv->base -= SATAHC0_REG_BASE;
#if defined(CONFIG_HAVE_CLK)
EXPORT_SYMBOL(dmam_free_coherent);
/**
- * dmam_alloc_non_coherent - Managed dma_alloc_non_coherent()
+ * dmam_alloc_non_coherent - Managed dma_alloc_noncoherent()
* @dev: Device to allocate non_coherent memory for
* @size: Size of allocation
* @dma_handle: Out argument for allocated DMA handle
* @gfp: Allocation flags
*
- * Managed dma_alloc_non_coherent(). Memory allocated using this
+ * Managed dma_alloc_noncoherent(). Memory allocated using this
* function will be automatically released on driver detach.
*
* RETURNS:
{
struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
int ret, blkindex, blkpos;
- size_t blksize, tmp_dst_len;
+ size_t tmp_dst_len;
void *tmp_dst;
/* index of the compressed lzo block */
blkindex = regcache_lzo_get_blkindex(map, reg);
/* register index within the decompressed block */
blkpos = regcache_lzo_get_blkpos(map, reg);
- /* size of the compressed block */
- blksize = regcache_lzo_get_blksize(map);
lzo_blocks = map->cache;
lzo_block = lzo_blocks[blkindex];
{
struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
int ret, blkindex, blkpos;
- size_t blksize, tmp_dst_len;
+ size_t tmp_dst_len;
void *tmp_dst;
/* index of the compressed lzo block */
blkindex = regcache_lzo_get_blkindex(map, reg);
/* register index within the decompressed block */
blkpos = regcache_lzo_get_blkpos(map, reg);
- /* size of the compressed block */
- blksize = regcache_lzo_get_blksize(map);
lzo_blocks = map->cache;
lzo_block = lzo_blocks[blkindex];
drv);
int err = 0;
+ get_device(dev);
if (adrv->probe)
err = adrv->probe(core);
+ if (err)
+ put_device(dev);
return err;
}
if (adrv->remove)
adrv->remove(core);
+ put_device(dev);
return 0;
}
/* always call with the tx_lock held */
static int nbd_send_req(struct nbd_device *lo, struct request *req)
{
- int result, flags;
+ int result;
struct nbd_request request;
unsigned long size = blk_rq_bytes(req);
+ struct bio *bio;
request.magic = htonl(NBD_REQUEST_MAGIC);
request.type = htonl(nbd_cmd(req));
goto error_out;
}
- if (nbd_cmd(req) == NBD_CMD_WRITE) {
- struct req_iterator iter;
+ if (nbd_cmd(req) != NBD_CMD_WRITE)
+ return 0;
+
+ bio = req->bio;
+ while (bio) {
+ struct bio *next = bio->bi_next;
+ int i;
struct bio_vec *bvec;
- /*
- * we are really probing at internals to determine
- * whether to set MSG_MORE or not...
- */
- rq_for_each_segment(bvec, req, iter) {
- flags = 0;
- if (!rq_iter_last(req, iter))
- flags = MSG_MORE;
+
+ bio_for_each_segment(bvec, bio, i) {
+ bool is_last = !next && i == bio->bi_vcnt - 1;
+ int flags = is_last ? 0 : MSG_MORE;
+
dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
lo->disk->disk_name, req, bvec->bv_len);
result = sock_send_bvec(lo, bvec, flags);
result);
goto error_out;
}
+ /*
+ * The completion might already have come in,
+ * so break for the last one instead of letting
+ * the iterator do it. This prevents use-after-free
+ * of the bio.
+ */
+ if (is_last)
+ break;
}
+ bio = next;
}
return 0;
static void make_response(struct xen_blkif *blkif, u64 id,
unsigned short op, int st)
{
- struct blkif_response resp;
+ struct blkif_response *resp;
unsigned long flags;
union blkif_back_rings *blk_rings = &blkif->blk_rings;
int notify;
- resp.id = id;
- resp.operation = op;
- resp.status = st;
-
spin_lock_irqsave(&blkif->blk_ring_lock, flags);
/* Place on the response ring for the relevant domain. */
switch (blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
- memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->native,
+ blk_rings->native.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_32:
- memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->x86_32,
+ blk_rings->x86_32.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_64:
- memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->x86_64,
+ blk_rings->x86_64.rsp_prod_pvt);
break;
default:
BUG();
}
+
+ resp->id = id;
+ resp->operation = op;
+ resp->status = st;
+
blk_rings->common.rsp_prod_pvt++;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
struct blkif_common_request {
char dummy;
};
-struct blkif_common_response {
- char dummy;
-};
/* i386 protocol version */
#pragma pack(push, 4)
struct blkif_x86_32_request_discard discard;
} u;
};
-struct blkif_x86_32_response {
- uint64_t id; /* copied from request */
- uint8_t operation; /* copied from request */
- int16_t status; /* BLKIF_RSP_??? */
-};
#pragma pack(pop)
/* x86_64 protocol version */
struct blkif_x86_64_request_discard discard;
} u;
};
-struct blkif_x86_64_response {
- uint64_t __attribute__((__aligned__(8))) id;
- uint8_t operation; /* copied from request */
- int16_t status; /* BLKIF_RSP_??? */
-};
DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
- struct blkif_common_response);
+ struct blkif_response);
DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
- struct blkif_x86_32_response);
+ struct blkif_response __packed);
DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
- struct blkif_x86_64_response);
+ struct blkif_response);
union blkif_back_rings {
struct blkif_back_ring native;
{ USB_DEVICE(0x04CA, 0x300f) },
{ USB_DEVICE(0x04CA, 0x3010) },
{ USB_DEVICE(0x04CA, 0x3014) },
+ { USB_DEVICE(0x04CA, 0x3018) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x021c) },
{ USB_DEVICE(0x0930, 0x0220) },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
} else if (!strcmp(str, "auto")) {
parport_nr[0] = LP_PARPORT_AUTO;
} else if (!strcmp(str, "none")) {
- parport_nr[parport_ptr++] = LP_PARPORT_NONE;
+ if (parport_ptr < LP_NO)
+ parport_nr[parport_ptr++] = LP_PARPORT_NONE;
+ else
+ printk(KERN_INFO "lp: too many ports, %s ignored.\n",
+ str);
} else if (!strcmp(str, "reset")) {
reset = 1;
}
#endif
#ifdef CONFIG_STRICT_DEVMEM
+static inline int page_is_allowed(unsigned long pfn)
+{
+ return devmem_is_allowed(pfn);
+}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
u64 from = ((u64)pfn) << PAGE_SHIFT;
return 1;
}
#else
+static inline int page_is_allowed(unsigned long pfn)
+{
+ return 1;
+}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
return 1;
while (count > 0) {
unsigned long remaining;
+ int allowed;
sz = size_inside_page(p, count);
- if (!range_is_allowed(p >> PAGE_SHIFT, count))
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
return -EPERM;
+ if (allowed == 2) {
+ /* Show zeros for restricted memory. */
+ remaining = clear_user(buf, sz);
+ } else {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr)
+ return -EFAULT;
- /*
- * On ia64 if a page has been mapped somewhere as uncached, then
- * it must also be accessed uncached by the kernel or data
- * corruption may occur.
- */
- ptr = xlate_dev_mem_ptr(p);
- if (!ptr)
- return -EFAULT;
+ remaining = copy_to_user(buf, ptr, sz);
+
+ unxlate_dev_mem_ptr(p, ptr);
+ }
- remaining = copy_to_user(buf, ptr, sz);
- unxlate_dev_mem_ptr(p, ptr);
if (remaining)
return -EFAULT;
#endif
while (count > 0) {
+ int allowed;
+
sz = size_inside_page(p, count);
- if (!range_is_allowed(p >> PAGE_SHIFT, sz))
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
return -EPERM;
- /*
- * On ia64 if a page has been mapped somewhere as uncached, then
- * it must also be accessed uncached by the kernel or data
- * corruption may occur.
- */
- ptr = xlate_dev_mem_ptr(p);
- if (!ptr) {
- if (written)
- break;
- return -EFAULT;
- }
+ /* Skip actual writing when a page is marked as restricted. */
+ if (allowed == 1) {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr) {
+ if (written)
+ break;
+ return -EFAULT;
+ }
- copied = copy_from_user(ptr, buf, sz);
- unxlate_dev_mem_ptr(p, ptr);
- if (copied) {
- written += sz - copied;
- if (written)
- break;
- return -EFAULT;
+ copied = copy_from_user(ptr, buf, sz);
+ unxlate_dev_mem_ptr(p, ptr);
+ if (copied) {
+ written += sz - copied;
+ if (written)
+ break;
+ return -EFAULT;
+ }
}
buf += sz;
/* Will read cryptlen */
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
+ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
/* Write ICV */
append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
-#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
+#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
-#define fwnet_set_hdr_lf(lf) ((lf) << 30)
+#define fwnet_set_hdr_lf(lf) ((lf) << 30)
#define fwnet_set_hdr_ether_type(et) (et)
-#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
+#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
#define fwnet_set_hdr_fg_off(fgo) (fgo)
#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
fg_off = fwnet_get_hdr_fg_off(&hdr);
}
datagram_label = fwnet_get_hdr_dgl(&hdr);
- dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
+ dg_size = fwnet_get_hdr_dg_size(&hdr);
if (fg_off + len > dg_size)
return 0;
#ifdef CONFIG_ACPI
static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
{
- bool ret = false;
struct acpi_table_header *hdr;
acpi_size tbl_size;
UEFI_ACPI_VFCT *vfct;
- GOP_VBIOS_CONTENT *vbios;
- VFCT_IMAGE_HEADER *vhdr;
+ unsigned offset;
if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
return false;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
- goto out_unmap;
+ return false;
}
vfct = (UEFI_ACPI_VFCT *)hdr;
- if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
- DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
- goto out_unmap;
- }
+ offset = vfct->VBIOSImageOffset;
- vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
- vhdr = &vbios->VbiosHeader;
- DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
- vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
- vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
-
- if (vhdr->PCIBus != rdev->pdev->bus->number ||
- vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
- vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
- vhdr->VendorID != rdev->pdev->vendor ||
- vhdr->DeviceID != rdev->pdev->device) {
- DRM_INFO("ACPI VFCT table is not for this card\n");
- goto out_unmap;
- };
-
- if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
- DRM_ERROR("ACPI VFCT image truncated\n");
- goto out_unmap;
- }
+ while (offset < tbl_size) {
+ GOP_VBIOS_CONTENT *vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + offset);
+ VFCT_IMAGE_HEADER *vhdr = &vbios->VbiosHeader;
- rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
- ret = !!rdev->bios;
+ offset += sizeof(VFCT_IMAGE_HEADER);
+ if (offset > tbl_size) {
+ DRM_ERROR("ACPI VFCT image header truncated\n");
+ return false;
+ }
-out_unmap:
- return ret;
+ offset += vhdr->ImageLength;
+ if (offset > tbl_size) {
+ DRM_ERROR("ACPI VFCT image truncated\n");
+ return false;
+ }
+
+ if (vhdr->ImageLength &&
+ vhdr->PCIBus == rdev->pdev->bus->number &&
+ vhdr->PCIDevice == PCI_SLOT(rdev->pdev->devfn) &&
+ vhdr->PCIFunction == PCI_FUNC(rdev->pdev->devfn) &&
+ vhdr->VendorID == rdev->pdev->vendor &&
+ vhdr->DeviceID == rdev->pdev->device) {
+ rdev->bios = kmemdup(&vbios->VbiosContent,
+ vhdr->ImageLength,
+ GFP_KERNEL);
+
+ if (!rdev->bios) {
+ kfree(rdev->bios);
+ return false;
+ }
+ return true;
+ }
+ }
+
+ DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+ return false;
}
#else
static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
struct ttm_buffer_object *bo;
int ret = -EBUSY;
int put_count;
- uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
spin_lock(&glob->lru_lock);
while (ret == -EBUSY) {
if (unlikely(ret != 0))
goto out;
- if ((bo->mem.placement & swap_placement) != swap_placement) {
+ if (bo->mem.mem_type != TTM_PL_SYSTEM ||
+ bo->ttm->caching_state != tt_cached) {
struct ttm_mem_reg evict_mem;
evict_mem = bo->mem;
struct vmw_fence_obj **p_fence)
{
struct vmw_fence_obj *fence;
- int ret;
+ int ret;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(fence == NULL))
}
+/**
+ * vmw_fence_obj_lookup - Look up a user-space fence object
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @handle: A handle identifying the fence object.
+ * @return: A struct vmw_user_fence base ttm object on success or
+ * an error pointer on failure.
+ *
+ * The fence object is looked up and type-checked. The caller needs
+ * to have opened the fence object first, but since that happens on
+ * creation and fence objects aren't shareable, that's not an
+ * issue currently.
+ */
+static struct ttm_base_object *
+vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
+{
+ struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
+
+ if (!base) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (base->refcount_release != vmw_user_fence_base_release) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ ttm_base_object_unref(&base);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return base;
+}
+
+
int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
arg->kernel_cookie = jiffies + wait_timeout;
}
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- printk(KERN_ERR "Wait invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- printk(KERN_ERR "Fence signaled invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
fman = fence->fman;
(struct drm_vmw_fence_event_arg *) data;
struct vmw_fence_obj *fence = NULL;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct ttm_object_file *tfile = vmw_fp->tfile;
struct drm_vmw_fence_rep __user *user_fence_rep =
(struct drm_vmw_fence_rep __user *)(unsigned long)
arg->fence_rep;
*/
if (arg->handle) {
struct ttm_base_object *base =
- ttm_base_object_lookup(vmw_fp->tfile, arg->handle);
+ vmw_fence_obj_lookup(tfile, arg->handle);
+
+ if (IS_ERR(base))
+ return PTR_ERR(base);
- if (unlikely(base == NULL)) {
- DRM_ERROR("Fence event invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
fence = &(container_of(base, struct vmw_user_fence,
base)->fence);
(void) vmw_fence_obj_reference(fence);
if (user_fence_rep != NULL) {
bool existed;
- ret = ttm_ref_object_add(vmw_fp->tfile, base,
+ ret = ttm_ref_object_add(tfile, base,
TTM_REF_USAGE, &existed);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to reference a fence "
spin_unlock_irqrestore(&dev->event_lock, irq_flags);
out_no_event_space:
if (user_fence_rep != NULL)
- ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- handle, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
out_no_ref_obj:
vmw_fence_obj_unreference(&fence);
return ret;
break;
}
default:
- DRM_ERROR("Illegal vmwgfx get param request: %d\n",
- param->param);
return -EINVAL;
}
void *bounce;
int ret;
- if (unlikely(arg->pad64 != 0)) {
+ if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
128;
num_sizes = 0;
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
num_sizes += req->mip_levels[i];
+ }
- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
- DRM_VMW_MAX_MIP_LEVELS)
+ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
+ num_sizes == 0)
return -EINVAL;
size = vmw_user_surface_size + 128 +
/* Ignore report if ErrorRollOver */
if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
value[n] >= min && value[n] <= max &&
+ value[n] - min < field->maxusage &&
field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
goto exit;
}
}
if (field->value[n] >= min && field->value[n] <= max
+ && field->value[n] - min < field->maxusage
&& field->usage[field->value[n] - min].hid
&& search(value, field->value[n], count))
hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
if (value[n] >= min && value[n] <= max
+ && value[n] - min < field->maxusage
&& field->usage[value[n] - min].hid
&& search(field->value, value[n], count))
hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
return rdesc;
+ if (*rsize < 4)
+ return rdesc;
+
for (i = 0; i < *rsize - 4; i++)
if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) {
__u8 tmp;
#define USB_DEVICE_ID_ATEN_2PORTKVM 0x2204
#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
+#define USB_DEVICE_ID_ATEN_CS682 0x2213
+#define USB_DEVICE_ID_ATEN_CS692 0x8021
#define USB_VENDOR_ID_ATMEL 0x03eb
#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE, HID_QUIRK_NOGET },
/* See if the hypercall page is already set */
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
if (!virtaddr)
goto cleanup;
struct heartbeat_msg_data *heartbeat_msg;
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
- vmbus_recvpacket(channel, hbeat_txf_buf,
- PAGE_SIZE, &recvlen, &requestid);
+ while (1) {
+
+ vmbus_recvpacket(channel, hbeat_txf_buf,
+ PAGE_SIZE, &recvlen, &requestid);
+
+ if (!recvlen)
+ break;
- if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
sizeof(struct vmbuspipe_hdr)];
else
err = atk_read_value_new(sensor, value);
+ if (err)
+ return err;
+
sensor->is_valid = true;
sensor->last_updated = jiffies;
sensor->cached_value = *value;
if (res)
return res;
- val = (val * 10 / 625) * 8;
+ val = (clamp_val(val, -128000, 128000) * 10 / 625) * 8;
mutex_lock(&data->update_lock);
data->temp[attr->index] = val;
/* add the driver to the list of i2c drivers in the driver core */
driver->driver.owner = owner;
driver->driver.bus = &i2c_bus_type;
+ INIT_LIST_HEAD(&driver->clients);
/* When registration returns, the driver core
* will have called probe() for all matching-but-unbound devices.
pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
- INIT_LIST_HEAD(&driver->clients);
/* Walk the adapters that are already present */
i2c_for_each_dev(driver, __process_new_driver);
unsigned long arg)
{
struct i2c_smbus_ioctl_data data_arg;
- union i2c_smbus_data temp;
+ union i2c_smbus_data temp = {};
int datasize, res;
if (copy_from_user(&data_arg,
struct iw_cm_conn_param iw_param;
int ret;
+ if (!conn_param)
+ return -EINVAL;
+
ret = cma_modify_qp_rtr(id_priv, conn_param);
if (ret)
return ret;
if (!class)
goto out;
if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >=
- IB_MGMT_MAX_METHODS)
+ ARRAY_SIZE(class->method_table))
goto out;
method = class->method_table[convert_mgmt_class(
mad->mad_hdr.mgmt_class)];
if (status)
process_join_error(group, status);
else {
- ib_find_pkey(group->port->dev->device, group->port->port_num,
- be16_to_cpu(rec->pkey), &pkey_index);
+
+ if (ib_find_pkey(group->port->dev->device,
+ group->port->port_num, be16_to_cpu(rec->pkey),
+ &pkey_index))
+ pkey_index = MCAST_INVALID_PKEY_INDEX;
spin_lock_irq(&group->port->lock);
group->rec = *rec;
container_of(uobj, struct ib_uqp_object, uevent.uobject);
idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
- if (qp != qp->real_qp) {
- ib_close_qp(qp);
- } else {
+ if (qp == qp->real_qp)
ib_uverbs_detach_umcast(qp, uqp);
- ib_destroy_qp(qp);
- }
+ ib_destroy_qp(qp);
ib_uverbs_release_uevent(file, &uqp->uevent);
kfree(uqp);
}
if (context)
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
err = -EFAULT;
- goto err_dbmap;
+ goto err_cq_free;
}
return &cq->ibcq;
+err_cq_free:
+ mlx4_cq_free(dev->dev, &cq->mcq);
+
err_dbmap:
if (context)
mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
props->max_map_per_fmr = (1 << (32 - ilog2(dev->dev->caps.num_mpts))) - 1;
+ props->max_ah = INT_MAX;
out:
kfree(in_mad);
unsigned long flags;
while (wait) {
- unsigned long shadow;
+ unsigned long shadow = 0;
int cstart, previ = -1;
/*
while (!list_empty(&priv->cm.reap_list)) {
p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
- list_del(&p->list);
+ list_del_init(&p->list);
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
ipoib_cm_tx_destroy(p);
if (!rtnl_trylock())
return restart_syscall();
+ if ((test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
+ !strcmp(buf, "connected\n")) ||
+ (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
+ !strcmp(buf, "datagram\n"))) {
+ rtnl_unlock();
+ return 0;
+ }
+
/* flush paths if we switch modes so that connections are restarted */
if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints < 2)
+ return -ENODEV;
+
epirq = &interface->endpoint[0].desc;
epout = &interface->endpoint[1].desc;
int error = -ENOMEM;
interface = intf->cur_altsetting;
+
+ if (interface->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
int ret, pipe, i;
interface = intf->cur_altsetting;
+
+ if (interface->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -ENODEV;
DMI_MATCH(DMI_PRODUCT_VERSION, "DL760"),
},
},
+ {
+ /* Dell Embedded Box PC 3000 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
+ },
+ },
{
/* OQO Model 01 */
.matches = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
},
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
+ },
+ },
{ }
};
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
},
},
+ {
+ /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
+ },
+ },
{ }
};
DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
},
},
+ {
+ /* Schenker XMG C504 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
+ },
+ },
{ }
};
int error;
int i;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL);
input_dev = input_allocate_device();
if (!hanwang || !input_dev) {
struct input_dev *input_dev;
int error = -ENOMEM;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
input_dev = input_allocate_device();
if (!kbtab || !input_dev)
next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
left = (head - next_tail) % iommu->cmd_buf_size;
- if (left <= 2) {
+ if (left <= 0x20) {
struct iommu_cmd sync_cmd;
volatile u64 sem = 0;
int ret;
kfree(dom->aperture[i]);
}
+ if (dom->domain.id)
+ domain_id_free(dom->domain.id);
+
kfree(dom);
}
return -ENODEV;
}
+ if (hostif->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
dev_info(&udev->dev,
"%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
__func__, le16_to_cpu(udev->descriptor.idVendor),
cs->hw.ser->tty = tty;
atomic_set(&cs->hw.ser->refcnt, 1);
init_completion(&cs->hw.ser->dead_cmp);
-
tty->disc_data = cs;
+ /* Set the amount of data we're willing to receive per call
+ * from the hardware driver to half of the input buffer size
+ * to leave some reserve.
+ * Note: We don't do flow control towards the hardware driver.
+ * If more data is received than will fit into the input buffer,
+ * it will be dropped and an error will be logged. This should
+ * never happen as the device is slow and the buffer size ample.
+ */
+ tty->receive_room = RBUFSIZE/2;
+
/* OK.. Initialization of the datastructures and the HW is done.. Now
* startup system and notify the LL that we are ready to run
*/
driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
GIGASET_MODULENAME, GIGASET_DEVNAME,
&ops, THIS_MODULE);
- if (!driver)
+ if (!driver) {
+ rc = -ENOMEM;
goto error;
+ }
rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc);
if (rc != 0) {
if (!cc->key_size && strcmp(key, "-"))
goto out;
+ /* clear the flag since following operations may invalidate previously valid key */
+ clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+
if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
goto out;
- set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
-
r = crypt_setkey_allcpus(cc);
+ if (!r)
+ set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
out:
/* Hex key string not needed after here, so wipe it. */
tgt->type = dm_get_target_type(type);
if (!tgt->type) {
- DMERR("%s: %s: unknown target type", dm_device_name(t->md),
- type);
+ DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
return -EINVAL;
}
if (dm_target_needs_singleton(tgt->type)) {
if (t->num_targets) {
- DMERR("%s: target type %s must appear alone in table",
- dm_device_name(t->md), type);
- return -EINVAL;
+ tgt->error = "singleton target type must appear alone in table";
+ goto bad;
}
t->singleton = 1;
}
if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
- DMERR("%s: target type %s may not be included in read-only tables",
- dm_device_name(t->md), type);
- return -EINVAL;
+ tgt->error = "target type may not be included in a read-only table";
+ goto bad;
}
if (t->immutable_target_type) {
if (t->immutable_target_type != tgt->type) {
- DMERR("%s: immutable target type %s cannot be mixed with other target types",
- dm_device_name(t->md), t->immutable_target_type->name);
- return -EINVAL;
+ tgt->error = "immutable target type cannot be mixed with other target types";
+ goto bad;
}
} else if (dm_target_is_immutable(tgt->type)) {
if (t->num_targets) {
- DMERR("%s: immutable target type %s cannot be mixed with other target types",
- dm_device_name(t->md), tgt->type->name);
- return -EINVAL;
+ tgt->error = "immutable target type cannot be mixed with other target types";
+ goto bad;
}
t->immutable_target_type = tgt->type;
}
*/
if (!adjoin(t, tgt)) {
tgt->error = "Gap in table";
- r = -EINVAL;
goto bad;
}
return maxsectors << 9;
}
+/*
+ * In linear_congested() conf->raid_disks is used as a copy of
+ * mddev->raid_disks to iterate conf->disks[], because conf->raid_disks
+ * and conf->disks[] are created in linear_conf(), they are always
+ * consitent with each other, but mddev->raid_disks does not.
+ */
static int linear_congested(void *data, int bits)
{
struct mddev *mddev = data;
rcu_read_lock();
conf = rcu_dereference(mddev->private);
- for (i = 0; i < mddev->raid_disks && !ret ; i++) {
+ for (i = 0; i < conf->raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}
conf->disks[i-1].end_sector +
conf->disks[i].rdev->sectors;
+ /*
+ * conf->raid_disks is copy of mddev->raid_disks. The reason to
+ * keep a copy of mddev->raid_disks in struct linear_conf is,
+ * mddev->raid_disks may not be consistent with pointers number of
+ * conf->disks[] when it is updated in linear_add() and used to
+ * iterate old conf->disks[] earray in linear_congested().
+ * Here conf->raid_disks is always consitent with number of
+ * pointers in conf->disks[] array, and mddev->private is updated
+ * with rcu_assign_pointer() in linear_addr(), such race can be
+ * avoided.
+ */
+ conf->raid_disks = raid_disks;
+
return conf;
out:
if (!newconf)
return -ENOMEM;
+ /* newconf->raid_disks already keeps a copy of * the increased
+ * value of mddev->raid_disks, WARN_ONCE() is just used to make
+ * sure of this. It is possible that oldconf is still referenced
+ * in linear_congested(), therefore kfree_rcu() is used to free
+ * oldconf until no one uses it anymore.
+ */
oldconf = rcu_dereference(mddev->private);
mddev->raid_disks++;
+ WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
+ "copied raid_disks doesn't match mddev->raid_disks");
rcu_assign_pointer(mddev->private, newconf);
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
set_capacity(mddev->gendisk, mddev->array_sectors);
{
struct rcu_head rcu;
sector_t array_sectors;
+ int raid_disks; /* a copy of mddev->raid_disks */
struct dev_info disks[0];
};
#endif
remove_proc_entry("mdstat", NULL);
for_each_mddev(mddev, tmp) {
export_array(mddev);
+ mddev->ctime = 0;
mddev->hold_active = 0;
+ /*
+ * for_each_mddev() will call mddev_put() at the end of each
+ * iteration. As the mddev is now fully clear, this will
+ * schedule the mddev for destruction by a workqueue, and the
+ * destroy_workqueue() below will wait for that to complete.
+ */
}
destroy_workqueue(md_misc_wq);
destroy_workqueue(md_wq);
config DVB_DM1105
tristate "SDMC DM1105 based PCI cards"
- depends on DVB_CORE && PCI && I2C
+ depends on DVB_CORE && PCI && I2C && I2C_ALGOBIT
select DVB_PLL if !DVB_FE_CUSTOMISE
select DVB_STV0299 if !DVB_FE_CUSTOMISE
select DVB_STV0288 if !DVB_FE_CUSTOMISE
{
struct dvb_usb_device *d = purb->context;
struct dib0700_rc_response *poll_reply;
- u32 uninitialized_var(keycode);
+ u32 keycode;
u8 toggle;
deb_info("%s()\n", __func__);
if ((poll_reply->system == 0x00) && (poll_reply->data == 0x00)
&& (poll_reply->not_data == 0xff)) {
poll_reply->data_state = 2;
- break;
+ rc_repeat(d->rc_dev);
+ goto resubmit;
}
if ((poll_reply->system ^ poll_reply->not_system) != 0xff) {
int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
{
- struct hexline hx;
- u8 reset;
- int ret,pos=0;
+ struct hexline *hx;
+ u8 *buf;
+ int ret, pos = 0;
+ u16 cpu_cs_register = cypress[type].cpu_cs_register;
+
+ buf = kmalloc(sizeof(*hx), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ hx = (struct hexline *)buf;
/* stop the CPU */
- reset = 1;
- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
+ buf[0] = 1;
+ if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1)
err("could not stop the USB controller CPU.");
- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
+ while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk);
+ ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len);
- if (ret != hx.len) {
+ if (ret != hx->len) {
err("error while transferring firmware "
"(transferred size: %d, block size: %d)",
- ret,hx.len);
+ ret, hx->len);
ret = -EINVAL;
break;
}
}
if (ret < 0) {
err("firmware download failed at %d with %d",pos,ret);
+ kfree(buf);
return ret;
}
if (ret == 0) {
/* restart the CPU */
- reset = 0;
- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
+ buf[0] = 0;
+ if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) {
err("could not restart the USB controller CPU.");
ret = -EINVAL;
}
} else
ret = -EIO;
+ kfree(buf);
+
return ret;
}
EXPORT_SYMBOL(usb_cypress_load_firmware);
if (val >= 7)
*status |= FE_HAS_SYNC;
- if (val >= 8) /* Maybe 9? */
+ /*
+ * Actually, on state S8, it starts receiving TS, but the TS
+ * output is only on normal state after the transition to S9.
+ */
+ if (val >= 9)
*status |= FE_HAS_LOCK;
dprintk("val = %d, status = 0x%02x\n", val, *status);
kfree(state);
}
+static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
+{
+ return DVBFE_ALGO_HW;
+}
+
static struct dvb_frontend_ops mb86a20s_ops;
struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
.read_status = mb86a20s_read_status,
.read_signal_strength = mb86a20s_read_signal_strength,
.tune = mb86a20s_tune,
+ .get_frontend_algo = mb86a20s_get_frontend_algo,
};
MODULE_DESCRIPTION("DVB Frontend module for Fujitsu mb86A20s hardware");
static int smsusb_sendrequest(void *context, void *buffer, size_t size)
{
struct smsusb_device_t *dev = (struct smsusb_device_t *) context;
- int dummy;
+ struct SmsMsgHdr_ST *phdr;
+ int dummy, ret;
- smsendian_handle_message_header((struct SmsMsgHdr_ST *)buffer);
- return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
- buffer, size, &dummy, 1000);
+ phdr = kmalloc(size, GFP_KERNEL);
+ if (!phdr)
+ return -ENOMEM;
+ memcpy(phdr, buffer, size);
+
+ smsendian_handle_message_header((struct SmsMsgHdr_ST *)phdr);
+ ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
+ phdr, size, &dummy, 1000);
+
+ kfree(phdr);
+ return ret;
}
static char *smsusb1_fw_lkup[] = {
for (p = 0; p < entity->num_pads; p++) {
struct media_pad_desc pad;
+
+ memset(&pad, 0, sizeof(pad));
media_device_kpad_to_upad(&entity->pads[p], &pad);
if (copy_to_user(&links.pads[p], &pad, sizeof(pad)))
return -EFAULT;
if (entity->links[l].source->entity != entity)
continue;
+ memset(&link, 0, sizeof(link));
media_device_kpad_to_upad(entity->links[l].source,
&link.source);
media_device_kpad_to_upad(entity->links[l].sink,
if (allowance > ITE_RXDCR_MAX)
allowance = ITE_RXDCR_MAX;
+
+ use_demodulator = true;
}
}
dev->board.agc_analog_digital_select_gpio,
analog_or_digital);
- return status;
+ if (status < 0)
+ return status;
+
+ return 0;
}
int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3)
.output_mode = OUT_MODE_VIP11,
.demod_xfer_mode = 0,
.ctl_pin_status_mask = 0xFFFFFFC4,
- .agc_analog_digital_select_gpio = 0x00, /* According with PV cxPolaris.inf file */
+ .agc_analog_digital_select_gpio = 0x1c,
.tuner_sif_gpio = -1,
.tuner_scl_gpio = -1,
.tuner_sda_gpio = -1,
break;
case CX231XX_BOARD_CNXT_RDE_253S:
case CX231XX_BOARD_CNXT_RDU_253S:
+ case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1);
break;
case CX231XX_BOARD_HAUPPAUGE_EXETER:
case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
- errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
+ errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
break;
default:
break;
v4l2_info(&vpfe_dev->v4l2_dev,
"v4l2 sub device %s register fails\n",
sdinfo->name);
+ ret = -ENXIO;
goto probe_sd_out;
}
}
clones[i]);
}
+ put_device(dev);
+
return 0;
}
EXPORT_SYMBOL(mfd_clone_cell);
set_capacity(md->disk, size);
if (mmc_host_cmd23(card->host)) {
- if (mmc_card_mmc(card) ||
+ if ((mmc_card_mmc(card) &&
+ card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
(mmc_card_sd(card) &&
card->scr.cmds & SD_SCR_CMD23_SUPPORT))
md->flags |= MMC_BLK_CMD23;
cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
cmd1 = cmd->arg;
+ if (cmd->opcode == MMC_STOP_TRANSMISSION)
+ cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
+
if (host->sdio_irq_en) {
ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
host->base + HW_SSP_BLOCK_SIZE);
}
- if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
- (cmd->opcode == SD_IO_RW_EXTENDED))
+ if (cmd->opcode == SD_IO_RW_EXTENDED)
cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
cmd1 = cmd->arg;
platform_set_drvdata(pdev, mmc);
+ spin_lock_init(&host->lock);
+
ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host);
if (ret)
goto out_free_dma;
- spin_lock_init(&host->lock);
-
ret = mmc_add_host(mmc);
if (ret)
goto out_free_irq;
return;
}
timeout--;
- mdelay(1);
+ spin_unlock_irq(&host->lock);
+ usleep_range(900, 1100);
+ spin_lock_irq(&host->lock);
}
clk |= SDHCI_CLOCK_CARD_EN;
struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
+ if (enable)
+ pm_runtime_get_noresume(host->mmc->parent);
+
spin_lock_irqsave(&host->lock, flags);
sdhci_enable_sdio_irq_nolock(host, enable);
spin_unlock_irqrestore(&host->lock, flags);
+
+ if (!enable)
+ pm_runtime_put_noidle(host->mmc->parent);
}
static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
struct ushc_data *ushc;
int ret;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
if (mmc == NULL)
return -ENOMEM;
return err;
}
- if (bytes == 0) {
- err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
- if (err)
- return err;
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ if (err)
+ return err;
+ if (bytes == 0) {
err = clear_update_marker(ubi, vol, 0);
if (err)
return err;
netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
HECC_DEF_NAPI_WEIGHT);
- clk_enable(priv->clk);
+ err = clk_prepare_enable(priv->clk);
+ if (err) {
+ dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
+ goto probe_exit_clk;
+ }
+
err = register_candev(ndev);
if (err) {
dev_err(&pdev->dev, "register_candev() failed\n");
struct ti_hecc_priv *priv = netdev_priv(ndev);
unregister_candev(ndev);
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
clk_put(priv->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iounmap(priv->base);
hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
priv->can.state = CAN_STATE_SLEEPING;
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return 0;
}
{
struct net_device *dev = platform_get_drvdata(pdev);
struct ti_hecc_priv *priv = netdev_priv(dev);
+ int err;
- clk_enable(priv->clk);
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
struct device *dev = ep->dev->dev.parent;
int i;
+ if (!ep->descs)
+ return;
+
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
dma_addr_t d;
dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
ep->descs_dma_addr);
+ ep->descs = NULL;
}
static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
&lp->rx_dma_regs->dmasm);
- korina_free_ring(dev);
-
napi_disable(&lp->napi);
+ korina_free_ring(dev);
+
if (korina_init(dev) < 0) {
printk(KERN_ERR "%s: cannot restart device\n", dev->name);
return;
tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
writel(tmp, &lp->rx_dma_regs->dmasm);
- korina_free_ring(dev);
-
napi_disable(&lp->napi);
cancel_work_sync(&lp->restart_task);
+ korina_free_ring(dev);
+
free_irq(lp->rx_irq, dev);
free_irq(lp->tx_irq, dev);
free_irq(lp->ovr_irq, dev);
static void sky2_shutdown(struct pci_dev *pdev)
{
+ struct sky2_hw *hw = pci_get_drvdata(pdev);
+ int port;
+
+ for (port = 0; port < hw->ports; port++) {
+ struct net_device *ndev = hw->dev[port];
+
+ rtnl_lock();
+ if (netif_running(ndev)) {
+ dev_close(ndev);
+ netif_device_detach(ndev);
+ }
+ rtnl_unlock();
+ }
sky2_suspend(&pdev->dev);
pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
pci_set_power_state(pdev, PCI_D3hot);
int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout)
{
+ int ret;
+
+ down_read(&mlx4_priv(dev)->cmd.switch_sem);
if (mlx4_priv(dev)->cmd.use_events)
- return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
- in_modifier, op_modifier, op, timeout);
+ ret = mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
+ in_modifier, op_modifier, op, timeout);
else
- return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
- in_modifier, op_modifier, op, timeout);
+ ret = mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
+ in_modifier, op_modifier, op, timeout);
+
+ up_read(&mlx4_priv(dev)->cmd.switch_sem);
+ return ret;
}
EXPORT_SYMBOL_GPL(__mlx4_cmd);
{
struct mlx4_priv *priv = mlx4_priv(dev);
+ init_rwsem(&priv->cmd.switch_sem);
mutex_init(&priv->cmd.hcr_mutex);
sema_init(&priv->cmd.poll_sem, 1);
priv->cmd.use_events = 0;
if (!priv->cmd.context)
return -ENOMEM;
+ down_write(&priv->cmd.switch_sem);
for (i = 0; i < priv->cmd.max_cmds; ++i) {
priv->cmd.context[i].token = i;
priv->cmd.context[i].next = i + 1;
--priv->cmd.token_mask;
priv->cmd.use_events = 1;
+ up_write(&priv->cmd.switch_sem);
down(&priv->cmd.poll_sem);
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
+ down_write(&priv->cmd.switch_sem);
priv->cmd.use_events = 0;
for (i = 0; i < priv->cmd.max_cmds; ++i)
kfree(priv->cmd.context);
up(&priv->cmd.poll_sem);
+ up_write(&priv->cmd.switch_sem);
}
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
{
struct mlx4_cq *cq;
+ rcu_read_lock();
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
+ rcu_read_unlock();
+
if (!cq) {
mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
return;
}
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
+ * the CQ is freed only after interrupt handling is completed.
+ */
++cq->arm_sn;
cq->comp(cq);
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
struct mlx4_cq *cq;
- spin_lock(&cq_table->lock);
-
+ rcu_read_lock();
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
- if (cq)
- atomic_inc(&cq->refcount);
-
- spin_unlock(&cq_table->lock);
+ rcu_read_unlock();
if (!cq) {
- mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
return;
}
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
+ * the CQ is freed only after interrupt handling is completed.
+ */
cq->event(cq, event_type);
-
- if (atomic_dec_and_test(&cq->refcount))
- complete(&cq->free);
}
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
if (err)
goto err_put;
- spin_lock_irq(&cq_table->lock);
+ spin_lock(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
- spin_unlock_irq(&cq_table->lock);
+ spin_unlock(&cq_table->lock);
if (err)
goto err_cmpt_put;
return 0;
err_radix:
- spin_lock_irq(&cq_table->lock);
+ spin_lock(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock_irq(&cq_table->lock);
+ spin_unlock(&cq_table->lock);
err_cmpt_put:
mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
- synchronize_irq(priv->eq_table.eq[cq->vector].irq);
-
- spin_lock_irq(&cq_table->lock);
+ spin_lock(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock_irq(&cq_table->lock);
+ spin_unlock(&cq_table->lock);
+
+ synchronize_irq(priv->eq_table.eq[cq->vector].irq);
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
queue_work(mdev->workqueue, &priv->mcast_task);
priv->port_up = true;
+
+ /* Process all completions if exist to prevent
+ * the queues freezing if they are full
+ */
+ for (i = 0; i < priv->rx_ring_num; i++)
+ napi_schedule(&priv->rx_cq[i].napi);
+
netif_tx_start_all_queues(dev);
return 0;
ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
ring->stride = stride;
- if (ring->stride <= TXBB_SIZE)
+ if (ring->stride <= TXBB_SIZE) {
+ /* Stamp first unused send wqe */
+ __be32 *ptr = (__be32 *)ring->buf;
+ __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
+ *ptr = stamp;
+ /* Move pointer to start of rx section */
ring->buf += TXBB_SIZE;
+ }
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride;
if (!buf)
return -ENOMEM;
+ if (offset_in_page(buf)) {
+ dma_free_coherent(dev, PAGE_SIZE << order,
+ buf, sg_dma_address(mem));
+ return -ENOMEM;
+ }
+
sg_set_buf(mem, buf, PAGE_SIZE << order);
- BUG_ON(mem->offset);
sg_dma_len(mem) = PAGE_SIZE << order;
return 0;
}
#include <linux/timer.h>
#include <linux/semaphore.h>
#include <linux/workqueue.h>
+#include <linux/rwsem.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h>
struct mutex hcr_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
+ struct rw_semaphore switch_sem;
int max_cmds;
spinlock_t context_lock;
int free_head;
BUG_ON(lp->tx_skbs[i].skb != skb);
#endif
if (skb) {
- dev_kfree_skb(skb);
pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
lp->tx_skbs[i].skb = NULL;
lp->tx_skbs[i].skb_dma = 0;
}
*
* Return: Total number of bytes received
*/
-static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
+static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
{
void __iomem *addr;
u16 length, proto_type;
/* Check if received ethernet frame is a raw ethernet frame
* or an IP packet or an ARP packet */
- if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
+ if (proto_type > ETH_DATA_LEN) {
if (proto_type == ETH_P_IP) {
length = ((ntohl(in_be32(addr +
XEL_RXBUFF_OFFSET)) >>
XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
+ length = min_t(u16, length, ETH_DATA_LEN);
length += ETH_HLEN + ETH_FCS_LEN;
} else if (proto_type == ETH_P_ARP)
/* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
+ if (WARN_ON(length > maxlen))
+ length = maxlen;
+
/* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
data, length);
skb_reserve(skb, 2);
- len = xemaclite_recv_data(lp, (u8 *) skb->data);
+ len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
if (!len) {
dev->stats.rx_errors++;
size_t linear;
if (q->flags & IFF_VNET_HDR) {
- vnet_hdr_len = q->vnet_hdr_sz;
+ vnet_hdr_len = ACCESS_ONCE(q->vnet_hdr_sz);
err = -EINVAL;
if (len < vnet_hdr_len)
if (q->flags & IFF_VNET_HDR) {
struct virtio_net_hdr vnet_hdr;
- vnet_hdr_len = q->vnet_hdr_sz;
+ vnet_hdr_len = ACCESS_ONCE(q->vnet_hdr_sz);
if ((len -= vnet_hdr_len) < 0)
return -EINVAL;
cancel_delayed_work_sync(&phydev->state_queue);
mutex_lock(&phydev->lock);
- if (phydev->state > PHY_UP)
+ if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
}
if (tun->flags & TUN_VNET_HDR) {
- if (len < tun->vnet_hdr_sz)
+ int vnet_hdr_sz = ACCESS_ONCE(tun->vnet_hdr_sz);
+
+ if (len < vnet_hdr_sz)
return -EINVAL;
- len -= tun->vnet_hdr_sz;
+ len -= vnet_hdr_sz;
if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
return -EFAULT;
if (gso.hdr_len > len)
return -EINVAL;
- offset += tun->vnet_hdr_sz;
+ offset += vnet_hdr_sz;
}
if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
{
struct tun_pi pi = { 0, skb->protocol };
ssize_t total = 0;
+ int vnet_hdr_sz = 0;
+
+ if (tun->flags & TUN_VNET_HDR)
+ vnet_hdr_sz = ACCESS_ONCE(tun->vnet_hdr_sz);
if (!(tun->flags & TUN_NO_PI)) {
if ((len -= sizeof(pi)) < 0)
return -EINVAL;
- if (len < skb->len) {
+ if (len < skb->len + vnet_hdr_sz) {
/* Packet will be striped */
pi.flags |= TUN_PKT_STRIP;
}
total += sizeof(pi);
}
- if (tun->flags & TUN_VNET_HDR) {
+ if (vnet_hdr_sz) {
struct virtio_net_hdr gso = { 0 }; /* no info leak */
- if ((len -= tun->vnet_hdr_sz) < 0)
+ if ((len -= vnet_hdr_sz) < 0)
return -EINVAL;
if (skb_is_gso(skb)) {
if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
sizeof(gso))))
return -EFAULT;
- total += tun->vnet_hdr_sz;
+ total += vnet_hdr_sz;
}
len = min_t(int, skb->len, len);
struct net_device *netdev;
struct catc *catc;
u8 broadcast[6];
- int i, pktsz;
+ int pktsz, ret;
if (usb_set_interface(usbdev,
intf->altsetting->desc.bInterfaceNumber, 1)) {
if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
(!catc->rx_urb) || (!catc->irq_urb)) {
err("No free urbs available.");
- usb_free_urb(catc->ctrl_urb);
- usb_free_urb(catc->tx_urb);
- usb_free_urb(catc->rx_urb);
- usb_free_urb(catc->irq_urb);
- free_netdev(netdev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail_free;
}
/* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
catc->irq_buf, 2, catc_irq_done, catc, 1);
if (!catc->is_f5u011) {
+ u32 *buf;
+ int i;
+
dbg("Checking memory size\n");
- i = 0x12345678;
- catc_write_mem(catc, 0x7a80, &i, 4);
- i = 0x87654321;
- catc_write_mem(catc, 0xfa80, &i, 4);
- catc_read_mem(catc, 0x7a80, &i, 4);
+ buf = kmalloc(4, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto fail_free;
+ }
+
+ *buf = 0x12345678;
+ catc_write_mem(catc, 0x7a80, buf, 4);
+ *buf = 0x87654321;
+ catc_write_mem(catc, 0xfa80, buf, 4);
+ catc_read_mem(catc, 0x7a80, buf, 4);
- switch (i) {
+ switch (*buf) {
case 0x12345678:
catc_set_reg(catc, TxBufCount, 8);
catc_set_reg(catc, RxBufCount, 32);
dbg("32k Memory\n");
break;
}
+
+ kfree(buf);
dbg("Getting MAC from SEEROM.");
usb_set_intfdata(intf, catc);
SET_NETDEV_DEV(netdev, &intf->dev);
- if (register_netdev(netdev) != 0) {
- usb_set_intfdata(intf, NULL);
- usb_free_urb(catc->ctrl_urb);
- usb_free_urb(catc->tx_urb);
- usb_free_urb(catc->rx_urb);
- usb_free_urb(catc->irq_urb);
- free_netdev(netdev);
- return -EIO;
- }
+ ret = register_netdev(netdev);
+ if (ret)
+ goto fail_clear_intfdata;
+
return 0;
+
+fail_clear_intfdata:
+ usb_set_intfdata(intf, NULL);
+fail_free:
+ usb_free_urb(catc->ctrl_urb);
+ usb_free_urb(catc->tx_urb);
+ usb_free_urb(catc->rx_urb);
+ usb_free_urb(catc->irq_urb);
+ free_netdev(netdev);
+ return ret;
}
static void catc_disconnect(struct usb_interface *intf)
{
int len = skb->len;
- if (skb_headroom(skb) < 2) {
- struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags);
+ if (skb_cow_head(skb, 2)) {
dev_kfree_skb_any(skb);
- skb = skb2;
- if (!skb)
- return NULL;
+ return NULL;
}
skb_push(skb, 2);
*/
static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
{
- return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
- RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
- indx, 0, data, size, 500);
+ void *buf;
+ int ret;
+
+ buf = kmalloc(size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+ RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
+ indx, 0, buf, size, 500);
+ if (ret > 0 && ret <= size)
+ memcpy(data, buf, ret);
+ kfree(buf);
+ return ret;
}
-static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
+static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data)
{
- return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
- indx, 0, data, size, 500);
+ void *buf;
+ int ret;
+
+ buf = kmemdup(data, size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
+ indx, 0, buf, size, 500);
+ kfree(buf);
+ return ret;
}
static void ctrl_callback(struct urb *urb)
{
u32 tx_cmd_a, tx_cmd_b;
- if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
- struct sk_buff *skb2 =
- skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
+ if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) {
dev_kfree_skb_any(skb);
- skb = skb2;
- if (!skb)
- return NULL;
+ return NULL;
}
tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS;
struct i2400mu *i2400mu;
struct usb_device *usb_dev = interface_to_usbdev(iface);
+ if (iface->cur_altsetting->desc.bNumEndpoints < 4)
+ return -ENODEV;
+
if (usb_dev->speed != USB_SPEED_HIGH)
dev_err(dev, "device not connected as high speed\n");
priv->rx_ring_size = rx_ring_size;
priv->tx_ring_size = tx_ring_size;
- if (adm8211_alloc_rings(dev)) {
+ err = adm8211_alloc_rings(dev);
+ if (err) {
printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n",
pci_name(pdev));
goto err_iounmap;
break;
return -EOPNOTSUPP;
default:
- WARN_ON(1);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
mutex_lock(&ah->lock);
}
is_scanning_required = 1;
} else {
- dev_dbg(priv->adapter->dev, "info: trying to associate to %s and bssid %pM\n",
- (char *) req_ssid.ssid, bss->bssid);
+ dev_dbg(priv->adapter->dev, "info: trying to associate to '%.*s' bssid %pM\n",
+ req_ssid.ssid_len, (char *)req_ssid.ssid,
+ bss->bssid);
memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
break;
}
priv->assoc_request = -EINPROGRESS;
- wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
- (char *) sme->ssid, sme->bssid);
+ wiphy_dbg(wiphy, "info: Trying to associate to %.*s and bssid %pM\n",
+ (int)sme->ssid_len, (char *)sme->ssid, sme->bssid);
ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
priv->bss_mode, sme->channel, sme, 0);
priv->ibss_join_request = -EINPROGRESS;
- wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
- (char *) params->ssid, params->bssid);
+ wiphy_dbg(wiphy, "info: trying to join to %.*s and bssid %pM\n",
+ params->ssid_len, (char *)params->ssid, params->bssid);
ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
params->bssid, priv->bss_mode,
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
p += sprintf(p, "multicast_count=\"%d\"\n",
netdev_mc_count(netdev));
- p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid);
+ p += sprintf(p, "essid=\"%.*s\"\n", info.ssid.ssid_len,
+ info.ssid.ssid);
p += sprintf(p, "bssid=\"%pM\"\n", info.bssid);
p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan);
p += sprintf(p, "region_code = \"%02x\"\n", info.region_code);
{COUNTRY_CODE_GLOBAL_DOMAIN, "JP"},
{COUNTRY_CODE_WORLD_WIDE_13, "EC"},
{COUNTRY_CODE_TELEC_NETGEAR, "EC"},
+ {COUNTRY_CODE_WORLD_WIDE_13_5G_ALL, "US"},
};
/*
}
};
+static const struct ieee80211_regdomain rtl_regdom_12_13_5g_all = {
+ .n_reg_rules = 4,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ RTL819x_2GHZ_CH12_13,
+ RTL819x_5GHZ_5150_5350,
+ RTL819x_5GHZ_5470_5850,
+ }
+};
+
static const struct ieee80211_regdomain rtl_regdom_14 = {
.n_reg_rules = 3,
.alpha2 = "99",
return &rtl_regdom_no_midband;
case COUNTRY_CODE_IC:
return &rtl_regdom_11;
- case COUNTRY_CODE_ETSI:
case COUNTRY_CODE_TELEC_NETGEAR:
return &rtl_regdom_60_64;
+ case COUNTRY_CODE_ETSI:
case COUNTRY_CODE_SPAIN:
case COUNTRY_CODE_FRANCE:
case COUNTRY_CODE_ISRAEL:
return &rtl_regdom_14_60_64;
case COUNTRY_CODE_GLOBAL_DOMAIN:
return &rtl_regdom_14;
+ case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL:
+ return &rtl_regdom_12_13_5g_all;
default:
return &rtl_regdom_no_midband;
}
return NULL;
}
+static u8 channel_plan_to_country_code(u8 channelplan)
+{
+ switch (channelplan) {
+ case 0x20:
+ case 0x21:
+ return COUNTRY_CODE_WORLD_WIDE_13;
+ case 0x22:
+ return COUNTRY_CODE_IC;
+ case 0x25:
+ return COUNTRY_CODE_ETSI;
+ case 0x32:
+ return COUNTRY_CODE_TELEC_NETGEAR;
+ case 0x41:
+ return COUNTRY_CODE_GLOBAL_DOMAIN;
+ case 0x7f:
+ return COUNTRY_CODE_WORLD_WIDE_13_5G_ALL;
+ default:
+ return COUNTRY_CODE_MAX; /*Error*/
+ }
+}
+
int rtl_regd_init(struct ieee80211_hw *hw,
int (*reg_notifier) (struct wiphy *wiphy,
struct regulatory_request *request))
return -EINVAL;
/* init country_code from efuse channel plan */
- rtlpriv->regd.country_code = rtlpriv->efuse.channel_plan;
+ rtlpriv->regd.country_code =
+ channel_plan_to_country_code(rtlpriv->efuse.channel_plan);
- RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
- (KERN_DEBUG "rtl: EEPROM regdomain: 0x%0x\n",
- rtlpriv->regd.country_code));
+ RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
+ (KERN_DEBUG "rtl: EEPROM regdomain: 0x%0x conuntry code: %d\n",
+ rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code));
if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
COUNTRY_CODE_GLOBAL_DOMAIN = 10,
COUNTRY_CODE_WORLD_WIDE_13 = 11,
COUNTRY_CODE_TELEC_NETGEAR = 12,
+ COUNTRY_CODE_WORLD_WIDE_13_5G_ALL = 13,
/*add new channel plan above this line */
COUNTRY_CODE_MAX
spin_unlock_irq(&info->tx_lock);
spin_unlock_bh(&info->rx_lock);
+ del_timer_sync(&info->rx_refill_timer);
+
if (info->netdev->irq)
unbind_from_irqhandler(info->netdev->irq, info->netdev);
info->evtchn = info->netdev->irq = 0;
unregister_netdev(info->netdev);
- del_timer_sync(&info->rx_refill_timer);
-
free_percpu(info->stats);
free_netdev(info->netdev);
static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn)
{
- if (vio_find_node(dn))
+ struct vio_dev *vio_dev;
+
+ vio_dev = vio_find_node(dn);
+ if (vio_dev) {
+ put_device(&vio_dev->dev);
return -EINVAL;
+ }
if (!vio_register_device_node(dn)) {
printk(KERN_ERR
return -EINVAL;
vio_unregister_device(vio_dev);
+
+ put_device(&vio_dev->dev);
+
return 0;
}
if (!dev->pme_support)
return false;
+ /* PME-capable in principle, but not from the intended sleep state */
+ if (!pci_pme_capable(dev, pci_target_state(dev)))
+ return false;
+
while (bus->parent) {
struct pci_dev *bridge = bus->self;
input_set_capability(input, EV_KEY, KEY_POWER);
- error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
+ error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT,
DRIVER_NAME, input);
if (error) {
dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
pmic->get_ctrl_reg = &tps65910_get_ctrl_register;
pmic->num_regulators = ARRAY_SIZE(tps65910_regs);
info = tps65910_regs;
+ /* Work around silicon erratum SWCZ010: output programmed
+ * voltage level can go higher than expected or crash
+ * Workaround: use no synchronization of DCDC clocks
+ */
+ tps65910_clear_bits(pmic->mfd, TPS65910_DCDCCTRL,
+ DCDCCTRL_DCDCCKSYNC_MASK);
break;
case TPS65911:
pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
static void
con3270_update_string(struct con3270 *cp, struct string *s, int nr)
{
- if (s->len >= cp->view.cols - 5)
+ if (s->len < 4) {
+ /* This indicates a bug, but printing a warning would
+ * cause a deadlock. */
+ return;
+ }
+ if (s->string[s->len - 4] != TO_RA)
return;
raw3270_buffer_address(cp->view.dev, s->string + s->len - 3,
cp->view.cols * (nr + 1));
cp->cline->len + 4 : cp->view.cols;
s = con3270_alloc_string(cp, size);
memcpy(s->string, cp->cline->string, cp->cline->len);
- if (s->len < cp->view.cols - 5) {
+ if (cp->cline->len < cp->view.cols - 5) {
s->string[s->len - 4] = TO_RA;
s->string[s->len - 1] = 0;
} else {
- while (--size > cp->cline->len)
+ while (--size >= cp->cline->len)
s->string[size] = cp->view.ascebc[' '];
}
/* Replace cline with allocated line s and reset cline. */
goto cleanup;
for (i=0; i < MAXMINOR; ++i ) {
- sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
+ sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sys_ser[i].buffer) {
rc = -ENOMEM;
break;
struct qdio_q *q;
int i;
- for_each_input_queue(irq, q, i) {
- if (!references_shared_dsci(irq) &&
- has_multiple_inq_on_dsci(irq))
- xchg(q->irq_ptr->dsci, 0);
+ if (!references_shared_dsci(irq) &&
+ has_multiple_inq_on_dsci(irq))
+ xchg(irq->dsci, 0);
+ for_each_input_queue(irq, q, i) {
if (q->u.in.queue_start_poll) {
/* skip if polling is enabled or already in work */
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
*
* Debug traces for zfcp.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2016
*/
#define KMSG_COMPONENT "zfcp"
* @tag: tag indicating which kind of unsolicited status has been received
* @req: request for which a response was received
*/
-void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
+void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
{
struct zfcp_dbf *dbf = req->adapter->dbf;
struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
rec->u.res.req_issued = req->issued;
rec->u.res.prot_status = q_pref->prot_status;
rec->u.res.fsf_status = q_head->fsf_status;
+ rec->u.res.port_handle = q_head->port_handle;
+ rec->u.res.lun_handle = q_head->lun_handle;
memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
FSF_PROT_STATUS_QUAL_SIZE);
rec->pl_len, "fsf_res", req->req_id);
}
- debug_event(dbf->hba, 1, rec, sizeof(*rec));
+ debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
}
if (sdev) {
rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
rec->lun = zfcp_scsi_dev_lun(sdev);
- }
+ } else
+ rec->lun = ZFCP_DBF_INVALID_LUN;
}
/**
/**
- * zfcp_dbf_rec_run - trace event related to running recovery
+ * zfcp_dbf_rec_run_lvl - trace event related to running recovery
+ * @level: trace level to be used for event
* @tag: identifier for event
* @erp: erp_action running
*/
-void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
+void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
{
struct zfcp_dbf *dbf = erp->adapter->dbf;
struct zfcp_dbf_rec *rec = &dbf->rec_buf;
else
rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
+ debug_event(dbf->rec, level, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->rec_lock, flags);
+}
+
+/**
+ * zfcp_dbf_rec_run - trace event related to running recovery
+ * @tag: identifier for event
+ * @erp: erp_action running
+ */
+void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
+{
+ zfcp_dbf_rec_run_lvl(1, tag, erp);
+}
+
+/**
+ * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
+ * @tag: identifier for event
+ * @wka_port: well known address port
+ * @req_id: request ID to correlate with potential HBA trace record
+ */
+void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
+ u64 req_id)
+{
+ struct zfcp_dbf *dbf = wka_port->adapter->dbf;
+ struct zfcp_dbf_rec *rec = &dbf->rec_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dbf->rec_lock, flags);
+ memset(rec, 0, sizeof(*rec));
+
+ rec->id = ZFCP_DBF_REC_RUN;
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->port_status = wka_port->status;
+ rec->d_id = wka_port->d_id;
+ rec->lun = ZFCP_DBF_INVALID_LUN;
+
+ rec->u.run.fsf_req_id = req_id;
+ rec->u.run.rec_status = ~0;
+ rec->u.run.rec_step = ~0;
+ rec->u.run.rec_action = ~0;
+ rec->u.run.rec_count = ~0;
+
debug_event(dbf->rec, 1, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
static inline
-void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
- u64 req_id, u32 d_id)
+void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
+ char *paytag, struct scatterlist *sg, u8 id, u16 len,
+ u64 req_id, u32 d_id, u16 cap_len)
{
struct zfcp_dbf_san *rec = &dbf->san_buf;
u16 rec_len;
unsigned long flags;
+ struct zfcp_dbf_pay *payload = &dbf->pay_buf;
+ u16 pay_sum = 0;
spin_lock_irqsave(&dbf->san_lock, flags);
memset(rec, 0, sizeof(*rec));
rec->id = id;
rec->fsf_req_id = req_id;
rec->d_id = d_id;
- rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD);
- memcpy(rec->payload, data, rec_len);
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->pl_len = len; /* full length even if we cap pay below */
+ if (!sg)
+ goto out;
+ rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
+ memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
+ if (len <= rec_len)
+ goto out; /* skip pay record if full content in rec->payload */
+
+ /* if (len > rec_len):
+ * dump data up to cap_len ignoring small duplicate in rec->payload
+ */
+ spin_lock(&dbf->pay_lock);
+ memset(payload, 0, sizeof(*payload));
+ memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
+ payload->fsf_req_id = req_id;
+ payload->counter = 0;
+ for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
+ u16 pay_len, offset = 0;
+
+ while (offset < sg->length && pay_sum < cap_len) {
+ pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
+ (u16)(sg->length - offset));
+ /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
+ memcpy(payload->data, sg_virt(sg) + offset, pay_len);
+ debug_event(dbf->pay, 1, payload,
+ zfcp_dbf_plen(pay_len));
+ payload->counter++;
+ offset += pay_len;
+ pay_sum += pay_len;
+ }
+ }
+ spin_unlock(&dbf->pay_lock);
+out:
debug_event(dbf->san, 1, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->san_lock, flags);
}
struct zfcp_fsf_ct_els *ct_els = fsf->data;
u16 length;
- length = (u16)(ct_els->req->length + FC_CT_HDR_LEN);
- zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length,
- fsf->req_id, d_id);
+ length = (u16)zfcp_qdio_real_bytes(ct_els->req);
+ zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
+ length, fsf->req_id, d_id, length);
+}
+
+static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
+ struct zfcp_fsf_req *fsf,
+ u16 len)
+{
+ struct zfcp_fsf_ct_els *ct_els = fsf->data;
+ struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
+ struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
+ struct scatterlist *resp_entry = ct_els->resp;
+ struct fc_gpn_ft_resp *acc;
+ int max_entries, x, last = 0;
+
+ if (!(memcmp(tag, "fsscth2", 7) == 0
+ && ct_els->d_id == FC_FID_DIR_SERV
+ && reqh->ct_rev == FC_CT_REV
+ && reqh->ct_in_id[0] == 0
+ && reqh->ct_in_id[1] == 0
+ && reqh->ct_in_id[2] == 0
+ && reqh->ct_fs_type == FC_FST_DIR
+ && reqh->ct_fs_subtype == FC_NS_SUBTYPE
+ && reqh->ct_options == 0
+ && reqh->_ct_resvd1 == 0
+ && reqh->ct_cmd == FC_NS_GPN_FT
+ /* reqh->ct_mr_size can vary so do not match but read below */
+ && reqh->_ct_resvd2 == 0
+ && reqh->ct_reason == 0
+ && reqh->ct_explan == 0
+ && reqh->ct_vendor == 0
+ && reqn->fn_resvd == 0
+ && reqn->fn_domain_id_scope == 0
+ && reqn->fn_area_id_scope == 0
+ && reqn->fn_fc4_type == FC_TYPE_FCP))
+ return len; /* not GPN_FT response so do not cap */
+
+ acc = sg_virt(resp_entry);
+ max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
+ + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
+ * to account for header as 1st pseudo "entry" */;
+
+ /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
+ * response, allowing us to skip special handling for it - just skip it
+ */
+ for (x = 1; x < max_entries && !last; x++) {
+ if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
+ acc++;
+ else
+ acc = sg_virt(++resp_entry);
+
+ last = acc->fp_flags & FC_NS_FID_LAST;
+ }
+ len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
+ return len; /* cap after last entry */
}
/**
struct zfcp_fsf_ct_els *ct_els = fsf->data;
u16 length;
- length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN);
- zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length,
- fsf->req_id, 0);
+ length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
+ zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
+ length, fsf->req_id, ct_els->d_id,
+ zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
}
/**
struct fsf_status_read_buffer *srb =
(struct fsf_status_read_buffer *) fsf->data;
u16 length;
+ struct scatterlist sg;
length = (u16)(srb->length -
offsetof(struct fsf_status_read_buffer, payload));
- zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length,
- fsf->req_id, ntoh24(srb->d_id));
+ sg_init_one(&sg, srb->payload.data, length);
+ zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
+ fsf->req_id, ntoh24(srb->d_id), length);
}
/**
* @sc: pointer to struct scsi_cmnd
* @fsf: pointer to struct zfcp_fsf_req
*/
-void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
+void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
+ struct zfcp_fsf_req *fsf)
{
struct zfcp_adapter *adapter =
(struct zfcp_adapter *) sc->device->host->hostdata[0];
}
}
- debug_event(dbf->scsi, 1, rec, sizeof(*rec));
+ debug_event(dbf->scsi, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->scsi_lock, flags);
}
* zfcp device driver
* debug feature declarations
*
- * Copyright IBM Corp. 2008, 2010
+ * Copyright IBM Corp. 2008, 2016
*/
#ifndef ZFCP_DBF_H
#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
+enum zfcp_dbf_pseudo_erp_act_type {
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD = 0xff,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL = 0xfe,
+};
+
/**
* struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
* @ready: number of ready recovery actions
u32 d_id;
#define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
+ u16 pl_len;
} __packed;
/**
u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
u32 fsf_status;
u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+ u32 port_handle;
+ u32 lun_handle;
} __packed;
/**
struct zfcp_dbf_scsi scsi_buf;
};
+/**
+ * zfcp_dbf_hba_fsf_resp_suppress - true if we should not trace by default
+ * @req: request that has been completed
+ *
+ * Returns true if FCP response with only benign residual under count.
+ */
+static inline
+bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req)
+{
+ struct fsf_qtcb *qtcb = req->qtcb;
+ u32 fsf_stat = qtcb->header.fsf_status;
+ struct fcp_resp *fcp_rsp;
+ u8 rsp_flags, fr_status;
+
+ if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND)
+ return false; /* not an FCP response */
+ fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp;
+ rsp_flags = fcp_rsp->fr_flags;
+ fr_status = fcp_rsp->fr_status;
+ return (fsf_stat == FSF_FCP_RSP_AVAILABLE) &&
+ (rsp_flags == FCP_RESID_UNDER) &&
+ (fr_status == SAM_STAT_GOOD);
+}
+
static inline
void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
{
if (level <= req->adapter->dbf->hba->level)
- zfcp_dbf_hba_fsf_res(tag, req);
+ zfcp_dbf_hba_fsf_res(tag, level, req);
}
/**
zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
} else if (qtcb->header.fsf_status != FSF_GOOD) {
- zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
+ zfcp_dbf_hba_fsf_resp("fs_ferr",
+ zfcp_dbf_hba_fsf_resp_suppress(req)
+ ? 5 : 1, req);
} else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
(req->fsf_command == FSF_QTCB_OPEN_LUN)) {
scmd->device->host->hostdata[0];
if (level <= adapter->dbf->scsi->level)
- zfcp_dbf_scsi(tag, scmd, req);
+ zfcp_dbf_scsi(tag, level, scmd, req);
}
/**
_zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
}
+/**
+ * zfcp_dbf_scsi_nullcmnd() - trace NULLify of SCSI command in dev/tgt-reset.
+ * @scmnd: SCSI command that was NULLified.
+ * @fsf_req: request that owned @scmnd.
+ */
+static inline void zfcp_dbf_scsi_nullcmnd(struct scsi_cmnd *scmnd,
+ struct zfcp_fsf_req *fsf_req)
+{
+ _zfcp_dbf_scsi("scfc__1", 3, scmnd, fsf_req);
+}
+
#endif /* ZFCP_DBF_H */
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2016
*/
#define KMSG_COMPONENT "zfcp"
}
}
+/**
+ * zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery
+ * @port: zfcp_port whose fc_rport we should try to unblock
+ */
+static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
+{
+ unsigned long flags;
+ struct zfcp_adapter *adapter = port->adapter;
+ int port_status;
+ struct Scsi_Host *shost = adapter->scsi_host;
+ struct scsi_device *sdev;
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ port_status = atomic_read(&port->status);
+ if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
+ (port_status & (ZFCP_STATUS_COMMON_ERP_INUSE |
+ ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) {
+ /* new ERP of severity >= port triggered elsewhere meanwhile or
+ * local link down (adapter erp_failed but not clear unblock)
+ */
+ zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+ return;
+ }
+ spin_lock(shost->host_lock);
+ __shost_for_each_device(sdev, shost) {
+ struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
+ int lun_status;
+
+ if (zsdev->port != port)
+ continue;
+ /* LUN under port of interest */
+ lun_status = atomic_read(&zsdev->status);
+ if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
+ continue; /* unblock rport despite failed LUNs */
+ /* LUN recovery not given up yet [maybe follow-up pending] */
+ if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
+ (lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) {
+ /* LUN blocked:
+ * not yet unblocked [LUN recovery pending]
+ * or meanwhile blocked [new LUN recovery triggered]
+ */
+ zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action);
+ spin_unlock(shost->host_lock);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+ return;
+ }
+ }
+ /* now port has no child or all children have completed recovery,
+ * and no ERP of severity >= port was meanwhile triggered elsewhere
+ */
+ zfcp_scsi_schedule_rport_register(port);
+ spin_unlock(shost->host_lock);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
{
struct zfcp_adapter *adapter = act->adapter;
case ZFCP_ERP_ACTION_REOPEN_LUN:
if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
scsi_device_put(sdev);
+ zfcp_erp_try_rport_unblock(port);
break;
case ZFCP_ERP_ACTION_REOPEN_PORT:
- if (result == ZFCP_ERP_SUCCEEDED)
- zfcp_scsi_schedule_rport_register(port);
+ /* This switch case might also happen after a forced reopen
+ * was successfully done and thus overwritten with a new
+ * non-forced reopen at `ersfs_2'. In this case, we must not
+ * do the clean-up of the non-forced version.
+ */
+ if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
+ if (result == ZFCP_ERP_SUCCEEDED)
+ zfcp_erp_try_rport_unblock(port);
/* fall through */
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
put_device(&port->dev);
*
* External function declarations.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2016
*/
#ifndef ZFCP_EXT_H
extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
struct zfcp_port *, struct scsi_device *, u8, u8);
extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
+ struct zfcp_erp_action *erp);
+extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
+ struct zfcp_fsf_req *);
/* zfcp_erp.c */
extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
*
* Implementation of FSF commands.
*
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2015
*/
#define KMSG_COMPONENT "zfcp"
fc_host_port_type(shost) = FC_PORTTYPE_PTP;
break;
case FSF_TOPO_FABRIC:
- fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+ if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ else
+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
break;
case FSF_TOPO_AL:
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
fc_host_permanent_port_name(shost) = bottom->wwpn;
- fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
} else
fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
if (zfcp_adapter_multi_buffer_active(adapter)) {
if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
return -EIO;
+ qtcb->bottom.support.req_buf_length =
+ zfcp_qdio_real_bytes(sg_req);
if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
return -EIO;
+ qtcb->bottom.support.resp_buf_length =
+ zfcp_qdio_real_bytes(sg_resp);
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
zfcp_qdio_sbale_count(sg_req));
req->handler = zfcp_fsf_send_ct_handler;
req->qtcb->header.port_handle = wka_port->handle;
+ ct->d_id = wka_port->d_id;
req->data = ct;
zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
hton24(req->qtcb->bottom.support.d_id, d_id);
req->handler = zfcp_fsf_send_els_handler;
+ els->d_id = d_id;
req->data = els;
zfcp_dbf_san_req("fssels1", req, d_id);
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
+ if (!retval)
+ zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
return retval;
}
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
+ if (!retval)
+ zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
return retval;
}
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2016
*/
#ifndef FSF_H
#define FSF_APP_TAG_CHECK_FAILURE 0x00000082
#define FSF_REF_TAG_CHECK_FAILURE 0x00000083
#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
+#define FSF_FCP_RSP_AVAILABLE 0x000000AF
#define FSF_UNKNOWN_COMMAND 0x000000E2
#define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
#define FSF_INVALID_COMMAND_OPTION 0x000000E5
* @handler_data: data passed to handler function
* @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
* @status: used to pass error status to calling function
+ * @d_id: Destination ID of either open WKA port for CT or of D_ID for ELS
*/
struct zfcp_fsf_ct_els {
struct scatterlist *req;
void *handler_data;
struct zfcp_port *port;
int status;
+ u32 d_id;
};
#endif /* FSF_H */
* Data structure and helper functions for tracking pending FSF
* requests.
*
- * Copyright IBM Corporation 2009
+ * Copyright IBM Corp. 2009, 2016
*/
#ifndef ZFCP_REQLIST_H
spin_unlock_irqrestore(&rl->lock, flags);
}
+/**
+ * zfcp_reqlist_apply_for_all() - apply a function to every request.
+ * @rl: the requestlist that contains the target requests.
+ * @f: the function to apply to each request; the first parameter of the
+ * function will be the target-request; the second parameter is the same
+ * pointer as given with the argument @data.
+ * @data: freely chosen argument; passed through to @f as second parameter.
+ *
+ * Uses :c:macro:`list_for_each_entry` to iterate over the lists in the hash-
+ * table (not a 'safe' variant, so don't modify the list).
+ *
+ * Holds @rl->lock over the entire request-iteration.
+ */
+static inline void
+zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl,
+ void (*f)(struct zfcp_fsf_req *, void *), void *data)
+{
+ struct zfcp_fsf_req *req;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&rl->lock, flags);
+ for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
+ list_for_each_entry(req, &rl->buckets[i], list)
+ f(req, data);
+ spin_unlock_irqrestore(&rl->lock, flags);
+}
+
#endif /* ZFCP_REQLIST_H */
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2016
*/
#define KMSG_COMPONENT "zfcp"
}
if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
- /* This could be either
- * open LUN pending: this is temporary, will result in
- * open LUN or ERP_FAILED, so retry command
+ /* This could be
* call to rport_delete pending: mimic retry from
* fc_remote_port_chkready until rport is BLOCKED
*/
return retval;
}
+struct zfcp_scsi_req_filter {
+ u8 tmf_scope;
+ u32 lun_handle;
+ u32 port_handle;
+};
+
+static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data)
+{
+ struct zfcp_scsi_req_filter *filter =
+ (struct zfcp_scsi_req_filter *)data;
+
+ /* already aborted - prevent side-effects - or not a SCSI command */
+ if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND)
+ return;
+
+ /* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */
+ if (old_req->qtcb->header.port_handle != filter->port_handle)
+ return;
+
+ if (filter->tmf_scope == FCP_TMF_LUN_RESET &&
+ old_req->qtcb->header.lun_handle != filter->lun_handle)
+ return;
+
+ zfcp_dbf_scsi_nullcmnd((struct scsi_cmnd *)old_req->data, old_req);
+ old_req->data = NULL;
+}
+
+static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
+{
+ struct zfcp_adapter *adapter = zsdev->port->adapter;
+ struct zfcp_scsi_req_filter filter = {
+ .tmf_scope = FCP_TMF_TGT_RESET,
+ .port_handle = zsdev->port->handle,
+ };
+ unsigned long flags;
+
+ if (tm_flags == FCP_TMF_LUN_RESET) {
+ filter.tmf_scope = FCP_TMF_LUN_RESET;
+ filter.lun_handle = zsdev->lun_handle;
+ }
+
+ /*
+ * abort_lock secures against other processings - in the abort-function
+ * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data
+ */
+ write_lock_irqsave(&adapter->abort_lock, flags);
+ zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd,
+ &filter);
+ write_unlock_irqrestore(&adapter->abort_lock, flags);
+}
+
static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
retval = FAILED;
- } else
+ } else {
zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
+ zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
+ }
zfcp_fsf_req_free(fsf_req);
return retval;
ids.port_id = port->d_id;
ids.roles = FC_RPORT_ROLE_FCP_TARGET;
+ zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
if (!rport) {
dev_err(&port->adapter->ccw_device->dev,
struct fc_rport *rport = port->rport;
if (rport) {
+ zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
fc_remote_port_delete(rport);
port->rport = NULL;
}
void aac_fib_map_free(struct aac_dev *dev)
{
- if (dev->hw_fib_va && dev->max_fib_size) {
- pci_free_consistent(dev->pdev,
- (dev->max_fib_size *
- (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
- dev->hw_fib_va, dev->hw_fib_pa);
- }
+ size_t alloc_size;
+ size_t fib_size;
+ int num_fibs;
+
+ if(!dev->hw_fib_va || !dev->max_fib_size)
+ return;
+
+ num_fibs = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
+ fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
+ alloc_size = fib_size * num_fibs + ALIGN32 - 1;
+
+ pci_free_consistent(dev->pdev, alloc_size, dev->hw_fib_va,
+ dev->hw_fib_pa);
+
dev->hw_fib_va = NULL;
dev->hw_fib_pa = 0;
}
if (i<0)
return -ENOMEM;
- /* 32 byte alignment for PMC */
- hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
- dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
- (hw_fib_pa - dev->hw_fib_pa));
- dev->hw_fib_pa = hw_fib_pa;
memset(dev->hw_fib_va, 0,
(dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) *
(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
+ /* 32 byte alignment for PMC */
+ hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
+ hw_fib = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
+ (hw_fib_pa - dev->hw_fib_pa));
+
/* add Xport header */
- dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
+ hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
sizeof(struct aac_fib_xporthdr));
- dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr);
+ hw_fib_pa += sizeof(struct aac_fib_xporthdr);
- hw_fib = dev->hw_fib_va;
- hw_fib_pa = dev->hw_fib_pa;
/*
* Initialise the fibs
*/
{
u32 status = src_readl(dev, MUnit.OMR);
+ /*
+ * Check to see if the board panic'd.
+ */
+ if (unlikely(status & KERNEL_PANIC))
+ goto err_blink;
+
/*
* Check to see if the board failed any self tests.
*/
if (unlikely(status & SELF_TEST_FAILED))
- return -1;
+ goto err_out;
/*
- * Check to see if the board panic'd.
+ * Check to see if the board failed any self tests.
*/
- if (unlikely(status & KERNEL_PANIC))
- return (status >> 16) & 0xFF;
+ if (unlikely(status & MONITOR_PANIC))
+ goto err_out;
+
/*
* Wait for the adapter to be up and running.
*/
* Everything is OK
*/
return 0;
+
+err_out:
+ return -1;
+
+err_blink:
+ return (status >> 16) & 0xFF;
}
/**
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
struct CommandControlBlock *ccb;
int target = cmd->device->id;
- int lun = cmd->device->lun;
- uint8_t scsicmd = cmd->cmnd[0];
cmd->scsi_done = done;
cmd->host_scribble = NULL;
cmd->result = 0;
- if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
- if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
- cmd->result = (DID_NO_CONNECT << 16);
- }
- cmd->scsi_done(cmd);
- return 0;
- }
if (target == 16) {
/* virtual device for iop message transfer */
arcmsr_handle_virtual_command(acb, cmd);
spin_lock_irqsave(vhost->host->host_lock, flags);
vhost->state = IBMVFC_NO_CRQ;
vhost->logged_in = 0;
- ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
/* Clean out the queue */
memset(crq->msgs, 0, PAGE_SIZE);
task->num_scatter = qc->n_elem;
} else {
for_each_sg(qc->sg, sg, qc->n_elem, si)
- xfer += sg->length;
+ xfer += sg_dma_len(sg);
task->total_xfer_len = xfer;
task->num_scatter = si;
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = __devexit_p(lpfc_pci_remove_one),
+ .shutdown = lpfc_pci_remove_one,
.suspend = lpfc_pci_suspend_one,
.resume = lpfc_pci_resume_one,
.err_handler = &lpfc_err_handler,
};
#define MEGASAS_IS_LOGICAL(scp) \
- (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
+ ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
#define MEGASAS_DEV_INDEX(inst, scp) \
((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
goto out_done;
}
- switch (scmd->cmnd[0]) {
- case SYNCHRONIZE_CACHE:
- /*
- * FW takes care of flush cache on its own
- * No need to send it down
- */
+ /*
+ * FW takes care of flush cache on its own for Virtual Disk.
+ * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
+ */
+ if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
scmd->result = DID_OK << 16;
goto out_done;
- default:
- break;
}
if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
{
u32 tmp;
tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
- if (tmp && 1 << (slot_idx % 32)) {
+ if (tmp & 1 << (slot_idx % 32)) {
mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx);
mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
1 << (slot_idx % 32));
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
+ vfree(map_storep);
if (dif_storep)
vfree(dif_storep);
out_err:
kfree(lun_data);
out:
- scsi_device_put(sdev);
if (scsi_device_created(sdev))
/*
* the sdev we used didn't appear in the report luns scan
*/
__scsi_remove_device(sdev);
+ scsi_device_put(sdev);
return ret;
}
struct request_queue *rq = sdev->request_queue;
struct scsi_target *starget = sdev->sdev_target;
- error = scsi_device_set_state(sdev, SDEV_RUNNING);
- if (error)
- return error;
-
error = scsi_target_add(starget);
if (error)
return error;
#define READ_CAPACITY_RETRIES_ON_RESET 10
+/*
+ * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
+ * and the reported logical block size is bigger than 512 bytes. Note
+ * that last_sector is a u64 and therefore logical_to_sectors() is not
+ * applicable.
+ */
+static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
+{
+ u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
+
+ if (sizeof(sector_t) == 4 && last_sector > 0xffffffffULL)
+ return false;
+
+ return true;
+}
+
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
sd_read_protection_type(sdkp, buffer);
- if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
return sector_size;
}
- if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
sg_io_hdr_t *hp;
unsigned char cmnd[MAX_COMMAND_SIZE];
+ if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
+ return -EINVAL;
+
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
return k; /* probably out of space --> ENOMEM */
}
if (sdp->detached) {
- if (srp->bio)
+ if (srp->bio) {
blk_end_request_all(srp->rq, -EIO);
+ srp->rq = NULL;
+ }
+
sg_finish_rem_req(srp);
return -ENODEV;
}
iov_count = iov_shorten(iov, iov_count, hp->dxfer_len);
len = hp->dxfer_len;
}
+ if (len == 0) {
+ kfree(iov);
+ return -EINVAL;
+ }
res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov,
iov_count,
unsigned char *buffer;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
+ unsigned int ms_len = 128;
int rc, n;
static const char *loadmech[] =
scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
/* ask for mode page 0x2a */
- rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
+ rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
SR_TIMEOUT, 3, &data, NULL);
- if (!scsi_status_is_good(rc)) {
+ if (!scsi_status_is_good(rc) || data.length > ms_len ||
+ data.header_length + data.block_descriptor_length > data.length) {
/* failed, drive doesn't have capabilities mode page */
cd->cdi.speed = 1;
cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
ssb_printk(KERN_WARNING PFX "WARNING: Using"
" fallback SPROM failed (err %d)\n",
err);
+ goto out_free;
} else {
ssb_dprintk(KERN_DEBUG PFX "Using SPROM"
" revision %d provided by"
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = psb_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
.mmap = drm_gem_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
/* Need this many pages to handle worst case fragmented packet */
#define PACKET_PAGES_HIWATER (MAX_SKB_FRAGS + 2)
+/* Restrict GSO size to account for NVGRE */
+#define NETVSC_GSO_MAX_SIZE 62768
+
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
SET_ETHTOOL_OPS(net, ðtool_ops);
SET_NETDEV_DEV(net, &dev->device);
+ netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE);
ret = register_netdev(net);
if (ret != 0) {
mutex_lock(&indio_dev->mlock);
gpio_set_value(st->pdata->gpio_os0, (ret >> 0) & 1);
gpio_set_value(st->pdata->gpio_os1, (ret >> 1) & 1);
- gpio_set_value(st->pdata->gpio_os1, (ret >> 2) & 1);
+ gpio_set_value(st->pdata->gpio_os2, (ret >> 2) & 1);
st->oversampling = lval;
mutex_unlock(&indio_dev->mlock);
struct iio_buffer *ring = indio_dev->buffer;
signed short buf[2];
unsigned char status;
+ int ret;
mutex_lock(&indio_dev->mlock);
if (st->state == AD5933_CTRL_INIT_START_FREQ) {
ad5933_cmd(st, AD5933_CTRL_START_SWEEP);
st->state = AD5933_CTRL_START_SWEEP;
schedule_delayed_work(&st->work, st->poll_time_jiffies);
- mutex_unlock(&indio_dev->mlock);
- return;
+ goto out;
}
- ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
+ ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
+ if (ret)
+ goto out;
if (status & AD5933_STAT_DATA_VALID) {
- ad5933_i2c_read(st->client,
+ ret = ad5933_i2c_read(st->client,
test_bit(1, ring->scan_mask) ?
AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
ring->scan_count * 2, (u8 *)buf);
+ if (ret)
+ goto out;
if (ring->scan_count == 2) {
buf[0] = be16_to_cpu(buf[0]);
} else {
/* no data available - try again later */
schedule_delayed_work(&st->work, st->poll_time_jiffies);
- mutex_unlock(&indio_dev->mlock);
- return;
+ goto out;
}
if (status & AD5933_STAT_SWEEP_DONE) {
ad5933_cmd(st, AD5933_CTRL_INC_FREQ);
schedule_delayed_work(&st->work, st->poll_time_jiffies);
}
-
+out:
mutex_unlock(&indio_dev->mlock);
}
/* append rx status for mp test packets */
ptr = recvframe_pull(precvframe, (rmv_len -
sizeof(struct ethhdr) + 2) - 24);
+ if (!ptr)
+ return _FAIL;
memcpy(ptr, get_rxmem(precvframe), 24);
ptr += 24;
- } else
+ } else {
ptr = recvframe_pull(precvframe, (rmv_len -
sizeof(struct ethhdr) + (bsnaphdr ? 2 : 0)));
+ if (!ptr)
+ return _FAIL;
+ }
memcpy(ptr, pattrib->dst, ETH_ALEN);
memcpy(ptr+ETH_ALEN, pattrib->src, ETH_ALEN);
vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
- *pci_base = (dma_addr_t)vme_base + pci_offset;
+ *pci_base = (dma_addr_t)*vme_base + pci_offset;
*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
*enabled = 0;
} else if (IS_TYPE_NUMBER(param)) {
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
- /*
- * The GlobalSAN iSCSI Initiator for MacOSX does
- * not respond to MaxBurstLength, FirstBurstLength,
- * DefaultTime2Wait or DefaultTime2Retain parameter keys.
- * So, we set them to 'reply optional' here, and assume the
- * the defaults from iscsi_parameters.h if the initiator
- * is not RFC compliant and the keys are not negotiated.
- */
- if (!strcmp(param->name, MAXBURSTLENGTH))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, FIRSTBURSTLENGTH))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, DEFAULTTIME2WAIT))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, DEFAULTTIME2RETAIN))
- SET_PSTATE_REPLY_OPTIONAL(param);
/*
* Required for gPXE iSCSI boot client
*/
iscsi_release_param_list(tpg->param_list);
tpg->param_list = NULL;
}
- kfree(tpg);
return -ENOMEM;
}
long temperature;
int ret;
- ret = tz->ops->get_trip_temp(tz, 0, &temperature);
+ ret = tz->ops->get_crit_temp(tz, &temperature);
if (ret)
return ret;
#define DEFAULT_TX_BUF_COUNT 3
struct n_hdlc_buf {
- struct n_hdlc_buf *link;
+ struct list_head list_item;
int count;
char buf[1];
};
#define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe)
struct n_hdlc_buf_list {
- struct n_hdlc_buf *head;
- struct n_hdlc_buf *tail;
+ struct list_head list;
int count;
spinlock_t spinlock;
};
* @backup_tty - TTY to use if tty gets closed
* @tbusy - reentrancy flag for tx wakeup code
* @woke_up - FIXME: describe this field
- * @tbuf - currently transmitting tx buffer
* @tx_buf_list - list of pending transmit frame buffers
* @rx_buf_list - list of received frame buffers
* @tx_free_buf_list - list unused transmit frame buffers
struct tty_struct *backup_tty;
int tbusy;
int woke_up;
- struct n_hdlc_buf *tbuf;
struct n_hdlc_buf_list tx_buf_list;
struct n_hdlc_buf_list rx_buf_list;
struct n_hdlc_buf_list tx_free_buf_list;
/*
* HDLC buffer list manipulation functions
*/
-static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list);
+static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
+ struct n_hdlc_buf *buf);
static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
struct n_hdlc_buf *buf);
static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
{
struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
struct n_hdlc_buf *buf;
- unsigned long flags;
while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
- spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
- if (n_hdlc->tbuf) {
- n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf);
- n_hdlc->tbuf = NULL;
- }
- spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
}
static struct tty_ldisc_ops n_hdlc_ldisc = {
} else
break;
}
- kfree(n_hdlc->tbuf);
kfree(n_hdlc);
} /* end of n_hdlc_release() */
n_hdlc->woke_up = 0;
spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
- /* get current transmit buffer or get new transmit */
- /* buffer from list of pending transmit buffers */
-
- tbuf = n_hdlc->tbuf;
- if (!tbuf)
- tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
-
+ tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
while (tbuf) {
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d)sending frame %p, count=%d\n",
/* rollback was possible and has been done */
if (actual == -ERESTARTSYS) {
- n_hdlc->tbuf = tbuf;
+ n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
break;
}
/* if transmit error, throw frame away by */
/* free current transmit buffer */
n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf);
-
- /* this tx buffer is done */
- n_hdlc->tbuf = NULL;
-
+
/* wait up sleeping writers */
wake_up_interruptible(&tty->write_wait);
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d)frame %p pending\n",
__FILE__,__LINE__,tbuf);
-
- /* buffer not accepted by driver */
- /* set this buffer as pending buffer */
- n_hdlc->tbuf = tbuf;
+
+ /*
+ * the buffer was not accepted by driver,
+ * return it back into tx queue
+ */
+ n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
break;
}
}
int error = 0;
int count;
unsigned long flags;
-
+ struct n_hdlc_buf *buf = NULL;
+
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d)n_hdlc_tty_ioctl() called %d\n",
__FILE__,__LINE__,cmd);
/* report count of read data available */
/* in next available frame (if any) */
spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags);
- if (n_hdlc->rx_buf_list.head)
- count = n_hdlc->rx_buf_list.head->count;
+ buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list,
+ struct n_hdlc_buf, list_item);
+ if (buf)
+ count = buf->count;
else
count = 0;
spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags);
count = tty_chars_in_buffer(tty);
/* add size of next output frame in queue */
spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags);
- if (n_hdlc->tx_buf_list.head)
- count += n_hdlc->tx_buf_list.head->count;
+ buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list,
+ struct n_hdlc_buf, list_item);
+ if (buf)
+ count += buf->count;
spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags);
error = put_user(count, (int __user *)arg);
break;
poll_wait(filp, &tty->write_wait, wait);
/* set bits for operations that won't block */
- if (n_hdlc->rx_buf_list.head)
+ if (!list_empty(&n_hdlc->rx_buf_list.list))
mask |= POLLIN | POLLRDNORM; /* readable */
if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
mask |= POLLHUP;
if (tty_hung_up_p(filp))
mask |= POLLHUP;
if (!tty_is_writelocked(tty) &&
- n_hdlc->tx_free_buf_list.head)
+ !list_empty(&n_hdlc->tx_free_buf_list.list))
mask |= POLLOUT | POLLWRNORM; /* writable */
}
return mask;
memset(n_hdlc, 0, sizeof(*n_hdlc));
- n_hdlc_buf_list_init(&n_hdlc->rx_free_buf_list);
- n_hdlc_buf_list_init(&n_hdlc->tx_free_buf_list);
- n_hdlc_buf_list_init(&n_hdlc->rx_buf_list);
- n_hdlc_buf_list_init(&n_hdlc->tx_buf_list);
-
+ spin_lock_init(&n_hdlc->rx_free_buf_list.spinlock);
+ spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock);
+ spin_lock_init(&n_hdlc->rx_buf_list.spinlock);
+ spin_lock_init(&n_hdlc->tx_buf_list.spinlock);
+
+ INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list);
+ INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list);
+ INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list);
+ INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list);
+
/* allocate free rx buffer list */
for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
} /* end of n_hdlc_alloc() */
/**
- * n_hdlc_buf_list_init - initialize specified HDLC buffer list
- * @list - pointer to buffer list
+ * n_hdlc_buf_return - put the HDLC buffer after the head of the specified list
+ * @buf_list - pointer to the buffer list
+ * @buf - pointer to the buffer
*/
-static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list)
+static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
+ struct n_hdlc_buf *buf)
{
- memset(list, 0, sizeof(*list));
- spin_lock_init(&list->spinlock);
-} /* end of n_hdlc_buf_list_init() */
+ unsigned long flags;
+
+ spin_lock_irqsave(&buf_list->spinlock, flags);
+
+ list_add(&buf->list_item, &buf_list->list);
+ buf_list->count++;
+
+ spin_unlock_irqrestore(&buf_list->spinlock, flags);
+}
/**
* n_hdlc_buf_put - add specified HDLC buffer to tail of specified list
- * @list - pointer to buffer list
+ * @buf_list - pointer to buffer list
* @buf - pointer to buffer
*/
-static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
+static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list,
struct n_hdlc_buf *buf)
{
unsigned long flags;
- spin_lock_irqsave(&list->spinlock,flags);
-
- buf->link=NULL;
- if (list->tail)
- list->tail->link = buf;
- else
- list->head = buf;
- list->tail = buf;
- (list->count)++;
-
- spin_unlock_irqrestore(&list->spinlock,flags);
-
+
+ spin_lock_irqsave(&buf_list->spinlock, flags);
+
+ list_add_tail(&buf->list_item, &buf_list->list);
+ buf_list->count++;
+
+ spin_unlock_irqrestore(&buf_list->spinlock, flags);
} /* end of n_hdlc_buf_put() */
/**
* n_hdlc_buf_get - remove and return an HDLC buffer from list
- * @list - pointer to HDLC buffer list
+ * @buf_list - pointer to HDLC buffer list
*
* Remove and return an HDLC buffer from the head of the specified HDLC buffer
* list.
* Returns a pointer to HDLC buffer if available, otherwise %NULL.
*/
-static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list)
+static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list)
{
unsigned long flags;
struct n_hdlc_buf *buf;
- spin_lock_irqsave(&list->spinlock,flags);
-
- buf = list->head;
+
+ spin_lock_irqsave(&buf_list->spinlock, flags);
+
+ buf = list_first_entry_or_null(&buf_list->list,
+ struct n_hdlc_buf, list_item);
if (buf) {
- list->head = buf->link;
- (list->count)--;
+ list_del(&buf->list_item);
+ buf_list->count--;
}
- if (!list->head)
- list->tail = NULL;
-
- spin_unlock_irqrestore(&list->spinlock,flags);
+
+ spin_unlock_irqrestore(&buf_list->spinlock, flags);
return buf;
-
} /* end of n_hdlc_buf_get() */
static char hdlc_banner[] __initdata =
pbn_b0_4_1152000,
+ pbn_b0_4_1250000,
+
pbn_b0_2_1843200,
pbn_b0_4_1843200,
.uart_offset = 8,
},
+ [pbn_b0_4_1250000] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 1250000,
+ .uart_offset = 8,
+ },
+
[pbn_b0_2_1843200] = {
.flags = FL_BASE0,
.num_ports = 2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_brcm_trumanage },
+ /* MKS Tenta SCOM-080x serial cards */
+ { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
+ { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
+
/*
* These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
{ .compatible = "qcom,msm-uart" },
{}
};
+MODULE_DEVICE_TABLE(of, msm_match_table);
static struct platform_driver msm_platform_driver = {
.remove = msm_serial_remove,
#define AUART_CTRL2_TXE (1 << 8)
#define AUART_CTRL2_UARTEN (1 << 0)
+#define AUART_LINECTRL_BAUD_DIV_MAX 0x003fffc0
+#define AUART_LINECTRL_BAUD_DIV_MIN 0x000000ec
#define AUART_LINECTRL_BAUD_DIVINT_SHIFT 16
#define AUART_LINECTRL_BAUD_DIVINT_MASK 0xffff0000
#define AUART_LINECTRL_BAUD_DIVINT(v) (((v) & 0xffff) << 16)
struct ktermios *old)
{
u32 bm, ctrl, ctrl2, div;
- unsigned int cflag, baud;
+ unsigned int cflag, baud, baud_min, baud_max;
cflag = termios->c_cflag;
ctrl2 &= ~AUART_CTRL2_CTSEN;
/* set baud rate */
- baud = uart_get_baud_rate(u, termios, old, 0, u->uartclk);
- div = u->uartclk * 32 / baud;
+ baud_min = DIV_ROUND_UP(u->uartclk * 32, AUART_LINECTRL_BAUD_DIV_MAX);
+ baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN;
+ baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max);
+ div = DIV_ROUND_CLOSEST(u->uartclk * 32, baud);
ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
ctrl |= AUART_LINECTRL_BAUD_DIVINT(div >> 6);
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT,
- .evbit = { BIT_MASK(EV_KEY) },
- .keybit = { BIT_MASK(KEY_LEFTALT) },
+ .evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) },
+ .keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) },
},
{ },
};
* they are not on hot paths so a little discipline won't do
* any harm.
*
+ * The line discipline-related tty_struct fields are reset to
+ * prevent the ldisc driver from re-using stale information for
+ * the new ldisc instance.
+ *
* Locking: takes termios_mutex
*/
mutex_lock(&tty->termios_mutex);
tty->termios->c_line = num;
mutex_unlock(&tty->termios_mutex);
+
+ tty->disc_data = NULL;
+ tty->receive_room = 0;
}
/**
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
return 0;
+ if (new_screen_size > (4 << 20))
+ return -EINVAL;
newscreen = kmalloc(new_screen_size, GFP_USER);
if (!newscreen)
return -ENOMEM;
+ if (vc == sel_cons)
+ clear_selection();
+
old_rows = vc->vc_rows;
old_row_size = vc->vc_size_row;
break;
case 3: /* erase scroll-back buffer (and whole display) */
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
- vc->vc_screenbuf_size >> 1);
+ vc->vc_screenbuf_size);
set_origin(vc);
if (CON_IS_VISIBLE(vc))
update_screen(vc);
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
{ USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
+ { USB_DEVICE(0x2184, 0x0036) }, /* GW Instek AFG-125 */
{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
},
/* Motorola H24 HSPA module: */
dev_dbg(&intf->dev, "%s called\n", __func__);
- data = kmalloc(sizeof(struct usbtmc_device_data), GFP_KERNEL);
+ data = kzalloc(sizeof(struct usbtmc_device_data), GFP_KERNEL);
if (!data) {
dev_err(&intf->dev, "Unable to allocate kernel memory\n");
return -ENOMEM;
}
}
+ if (!data->bulk_out || !data->bulk_in) {
+ dev_err(&intf->dev, "bulk endpoints not found\n");
+ retcode = -ENODEV;
+ goto err_put;
+ }
+
retcode = get_capabilities(data);
if (retcode)
dev_err(&intf->dev, "can't read capabilities\n");
error_register:
sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
+err_put:
kref_put(&data->kref, usbtmc_delete);
return retcode;
}
if (ifp->desc.bNumEndpoints >= num_ep)
goto skip_to_next_endpoint_or_interface_descriptor;
+ /* Check for duplicate endpoint addresses */
+ for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
+ if (ifp->endpoint[i].desc.bEndpointAddress ==
+ d->bEndpointAddress) {
+ dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
+ cfgno, inum, asnum, d->bEndpointAddress);
+ goto skip_to_next_endpoint_or_interface_descriptor;
+ }
+ }
+
endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
++ifp->desc.bNumEndpoints;
/*
* Adjust bInterval for quirked devices.
+ */
+ /*
+ * This quirk fixes bIntervals reported in ms.
+ */
+ if (to_usb_device(ddev)->quirks &
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) {
+ n = clamp(fls(d->bInterval) + 3, i, j);
+ i = j = n;
+ }
+ /*
* This quirk fixes bIntervals reported in
* linear microframes.
*/
/* CBM - Flash disk */
{ USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* WORLDE easy key (easykey.25) MIDI controller */
+ { USB_DEVICE(0x0218, 0x0401), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* HP 5300/5370C scanner */
{ USB_DEVICE(0x03f0, 0x0701), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Baum Vario Ultra */
+ { USB_DEVICE(0x0904, 0x6101), .driver_info =
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+ { USB_DEVICE(0x0904, 0x6102), .driver_info =
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+ { USB_DEVICE(0x0904, 0x6103), .driver_info =
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+
/* Keytouch QWERTY Panel keyboard */
{ USB_DEVICE(0x0926, 0x3333), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
memset(¶ms, 0x00, sizeof(params));
if (value) {
+ if (dep->flags & DWC3_EP_STALL)
+ return 0;
+
if (dep->number == 0 || dep->number == 1) {
/*
* Whenever EP0 is stalled, we will restart
else
dep->flags |= DWC3_EP_STALL;
} else {
+ if (!(dep->flags & DWC3_EP_STALL))
+ return 0;
+
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
DWC3_DEPCMD_CLEARSTALL, ¶ms);
if (ret)
#define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget))
/* DEPCFG parameter 1 */
-#define DWC3_DEPCFG_INT_NUM(n) ((n) << 0)
+#define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0)
#define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8)
#define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9)
#define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10)
#define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11)
#define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13)
-#define DWC3_DEPCFG_BINTERVAL_M1(n) ((n) << 16)
+#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16)
#define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24)
-#define DWC3_DEPCFG_EP_NUMBER(n) ((n) << 25)
+#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25)
#define DWC3_DEPCFG_BULK_BASED (1 << 30)
#define DWC3_DEPCFG_FIFO_BASED (1 << 31)
/* DEPCFG parameter 0 */
-#define DWC3_DEPCFG_EP_TYPE(n) ((n) << 1)
-#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) ((n) << 3)
-#define DWC3_DEPCFG_FIFO_NUMBER(n) ((n) << 17)
-#define DWC3_DEPCFG_BURST_SIZE(n) ((n) << 22)
+#define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1)
+#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) (((n) & 0x7ff) << 3)
+#define DWC3_DEPCFG_FIFO_NUMBER(n) (((n) & 0x1f) << 17)
+#define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22)
#define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26)
#define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31)
ep_found:
/* commit results */
- _ep->maxpacket = usb_endpoint_maxp(chosen_desc);
+ _ep->maxpacket = usb_endpoint_maxp(chosen_desc) & 0x7ff;
_ep->desc = chosen_desc;
_ep->comp_desc = NULL;
_ep->maxburst = 0;
value = min(w_length, (u16) 1);
break;
- /* function drivers must handle get/set altsetting; if there's
- * no get() method, we know only altsetting zero works.
- */
+ /* function drivers must handle get/set altsetting */
case USB_REQ_SET_INTERFACE:
if (ctrl->bRequestType != USB_RECIP_INTERFACE)
goto unknown;
f = cdev->config->interface[intf];
if (!f)
break;
- if (w_value && !f->set_alt)
+
+ /*
+ * If there's no get_alt() method, we know only altsetting zero
+ * works. There is no need to check if set_alt() is not NULL
+ * as we check this in usb_add_function().
+ */
+ if (w_value && !f->get_alt)
break;
value = f->set_alt(f, w_index, w_value);
if (value == USB_GADGET_DELAYED_STATUS) {
static void
stop_activity (struct dummy *dum)
{
- struct dummy_ep *ep;
+ int i;
/* prevent any more requests */
dum->address = 0;
/* The timer is left running so that outstanding URBs can fail */
/* nuke any pending requests first, so driver i/o is quiesced */
- list_for_each_entry (ep, &dum->gadget.ep_list, ep.ep_list)
- nuke (dum, ep);
+ for (i = 0; i < DUMMY_ENDPOINTS; ++i)
+ nuke(dum, &dum->ep[i]);
/* driver now does any non-usb quiescing necessary */
}
{
struct usb_composite_dev *cdev = acm->port.func.config->cdev;
int status;
+ __le16 serial_state;
spin_lock(&acm->lock);
if (acm->notify_req) {
DBG(cdev, "acm ttyGS%d serial state %04x\n",
acm->port_num, acm->serial_state);
+ serial_state = cpu_to_le16(acm->serial_state);
status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
- 0, &acm->serial_state, sizeof(acm->serial_state));
+ 0, &serial_state, sizeof(acm->serial_state));
} else {
acm->pending = true;
status = 0;
/* recv report */
char *set_report_buff;
unsigned short set_report_length;
- spinlock_t spinlock;
+ spinlock_t read_spinlock;
wait_queue_head_t read_queue;
/* send report */
- struct mutex lock;
+ spinlock_t write_spinlock;
bool write_pending;
wait_queue_head_t write_queue;
struct usb_request *req;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
- spin_lock_irqsave(&hidg->spinlock, flags);
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
#define READ_COND (hidg->set_report_buff != NULL)
while (!READ_COND) {
- spin_unlock_irqrestore(&hidg->spinlock, flags);
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
if (wait_event_interruptible(hidg->read_queue, READ_COND))
return -ERESTARTSYS;
- spin_lock_irqsave(&hidg->spinlock, flags);
+ spin_lock_irqsave(&hidg->read_spinlock, flags);
}
tmp_buff = hidg->set_report_buff;
hidg->set_report_buff = NULL;
- spin_unlock_irqrestore(&hidg->spinlock, flags);
+ spin_unlock_irqrestore(&hidg->read_spinlock, flags);
if (tmp_buff != NULL) {
/* copy to user outside spinlock */
static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req)
{
struct f_hidg *hidg = (struct f_hidg *)ep->driver_data;
+ unsigned long flags;
if (req->status != 0) {
ERROR(hidg->func.config->cdev,
"End Point Request ERROR: %d\n", req->status);
}
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
hidg->write_pending = 0;
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
wake_up(&hidg->write_queue);
}
size_t count, loff_t *offp)
{
struct f_hidg *hidg = file->private_data;
+ unsigned long flags;
ssize_t status = -ENOMEM;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
- mutex_lock(&hidg->lock);
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
#define WRITE_COND (!hidg->write_pending)
/* write queue */
while (!WRITE_COND) {
- mutex_unlock(&hidg->lock);
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
hidg->write_queue, WRITE_COND))
return -ERESTARTSYS;
- mutex_lock(&hidg->lock);
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
}
+ hidg->write_pending = 1;
count = min_t(unsigned, count, hidg->report_length);
+
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
status = copy_from_user(hidg->req->buf, buffer, count);
if (status != 0) {
ERROR(hidg->func.config->cdev,
"copy_from_user error\n");
- mutex_unlock(&hidg->lock);
- return -EINVAL;
+ status = -EINVAL;
+ goto release_write_pending;
}
hidg->req->status = 0;
hidg->req->length = count;
hidg->req->complete = f_hidg_req_complete;
hidg->req->context = hidg;
- hidg->write_pending = 1;
status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC);
if (status < 0) {
ERROR(hidg->func.config->cdev,
"usb_ep_queue error on int endpoint %zd\n", status);
- hidg->write_pending = 0;
- wake_up(&hidg->write_queue);
+ goto release_write_pending;
} else {
status = count;
}
- mutex_unlock(&hidg->lock);
+ return status;
+release_write_pending:
+ spin_lock_irqsave(&hidg->write_spinlock, flags);
+ hidg->write_pending = 0;
+ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+
+ wake_up(&hidg->write_queue);
return status;
}
return;
}
- spin_lock(&hidg->spinlock);
+ spin_lock(&hidg->read_spinlock);
hidg->set_report_buff = krealloc(hidg->set_report_buff,
req->actual, GFP_ATOMIC);
if (hidg->set_report_buff == NULL) {
- spin_unlock(&hidg->spinlock);
+ spin_unlock(&hidg->read_spinlock);
return;
}
hidg->set_report_length = req->actual;
memcpy(hidg->set_report_buff, req->buf, req->actual);
- spin_unlock(&hidg->spinlock);
+ spin_unlock(&hidg->read_spinlock);
wake_up(&hidg->read_queue);
}
goto fail;
}
- mutex_init(&hidg->lock);
- spin_lock_init(&hidg->spinlock);
+ spin_lock_init(&hidg->write_spinlock);
+ spin_lock_init(&hidg->read_spinlock);
init_waitqueue_head(&hidg->write_queue);
init_waitqueue_head(&hidg->read_queue);
/* data and/or status stage for control request */
} else if (dev->state == STATE_DEV_SETUP) {
- /* IN DATA+STATUS caller makes len <= wLength */
+ len = min_t(size_t, len, dev->setup_wLength);
if (dev->setup_in) {
retval = setup_req (dev->gadget->ep0, dev->req, len);
if (retval == 0) {
* such as configuration notifications.
*/
-static int is_valid_config (struct usb_config_descriptor *config)
+static int is_valid_config(struct usb_config_descriptor *config,
+ unsigned int total)
{
return config->bDescriptorType == USB_DT_CONFIG
&& config->bLength == USB_DT_CONFIG_SIZE
+ && total >= USB_DT_CONFIG_SIZE
&& config->bConfigurationValue != 0
&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
u32 tag;
char *kbuf;
- if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
+ if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
+ (len > PAGE_SIZE * 4))
return -EINVAL;
/* we might need to change message format someday */
/* full or low speed config */
dev->config = (void *) kbuf;
total = le16_to_cpu(dev->config->wTotalLength);
- if (!is_valid_config (dev->config) || total >= length)
+ if (!is_valid_config(dev->config, total) ||
+ total > length - USB_DT_DEVICE_SIZE)
goto fail;
kbuf += total;
length -= total;
if (kbuf [1] == USB_DT_CONFIG) {
dev->hs_config = (void *) kbuf;
total = le16_to_cpu(dev->hs_config->wTotalLength);
- if (!is_valid_config (dev->hs_config) || total >= length)
+ if (!is_valid_config(dev->hs_config, total) ||
+ total > length - USB_DT_DEVICE_SIZE)
goto fail;
kbuf += total;
length -= total;
+ } else {
+ dev->hs_config = NULL;
}
/* could support multiple configs, using another encoding! */
req->length = length;
- /* throttle high/super speed IRQ rate back slightly */
- if (gadget_is_dualspeed(dev->gadget))
- req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
- dev->gadget->speed == USB_SPEED_SUPER)
- ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
- : 0;
-
retval = usb_ep_queue(in, req, GFP_ATOMIC);
switch (retval) {
default:
if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP)
uhci->wait_for_hp = 1;
+ /* Intel controllers use non-PME wakeup signalling */
+ if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL)
+ device_set_run_wake(uhci_dev(uhci), 1);
+
/* Set up pointers to PCI-specific functions */
uhci->reset_hc = uhci_pci_reset_hc;
uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc;
xhci->devs[slot_id] = NULL;
}
+/*
+ * Free a virt_device structure.
+ * If the virt_device added a tt_info (a hub) and has children pointing to
+ * that tt_info, then free the child first. Recursive.
+ * We can't rely on udev at this point to find child-parent relationships.
+ */
+void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
+{
+ struct xhci_virt_device *vdev;
+ struct list_head *tt_list_head;
+ struct xhci_tt_bw_info *tt_info, *next;
+ int i;
+
+ vdev = xhci->devs[slot_id];
+ if (!vdev)
+ return;
+
+ tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
+ list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+ /* is this a hub device that added a tt_info to the tts list */
+ if (tt_info->slot_id == slot_id) {
+ /* are any devices using this tt_info? */
+ for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
+ vdev = xhci->devs[i];
+ if (vdev && (vdev->tt_info == tt_info))
+ xhci_free_virt_devices_depth_first(
+ xhci, i);
+ }
+ }
+ }
+ /* we are now at a leaf device */
+ xhci_free_virt_device(xhci, slot_id);
+}
+
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
struct usb_device *udev, gfp_t flags)
{
}
}
- for (i = 1; i < MAX_HC_SLOTS; ++i)
- xhci_free_virt_device(xhci, i);
+ for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
+ xhci_free_virt_devices_depth_first(xhci, i);
if (xhci->segment_pool)
dma_pool_destroy(xhci->segment_pool);
* "physically contiguous and 64-byte (cache line) aligned".
*/
xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
- GFP_KERNEL);
+ flags);
if (!xhci->dcbaa)
goto fail;
memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
xhci->erst.entries = dma_alloc_coherent(dev,
sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
- GFP_KERNEL);
+ flags);
if (!xhci->erst.entries)
goto fail;
xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
+#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
- pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
}
spin_lock_irqsave(&xhci->lock, flags);
ep->stop_cmds_pending--;
- if (xhci->xhc_state & XHCI_STATE_DYING) {
- xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
- "xHCI as DYING, exiting.\n");
- spin_unlock_irqrestore(&xhci->lock, flags);
- return;
- }
if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
"exiting.\n");
unsigned int ep_index;
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
+ struct xhci_virt_device *vdev;
xhci = hcd_to_xhci(hcd);
spin_lock_irqsave(&xhci->lock, flags);
/* Make sure the URB hasn't completed or been unlinked already */
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
- if (ret || !urb->hcpriv)
+ if (ret)
goto done;
+
+ /* give back URB now if we can't queue it for cancel */
+ vdev = xhci->devs[urb->dev->slot_id];
+ urb_priv = urb->hcpriv;
+ if (!vdev || !urb_priv)
+ goto err_giveback;
+
+ xhci_dbg(xhci, "Cancel URB %p\n", urb);
+ xhci_dbg(xhci, "Event ring:\n");
+ xhci_debug_ring(xhci, xhci->event_ring);
+ ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+ ep = &vdev->eps[ep_index];
+ ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ep || !ep_ring)
+ goto err_giveback;
+
+ xhci_dbg(xhci, "Endpoint ring:\n");
+ xhci_debug_ring(xhci, ep_ring);
+
temp = xhci_readl(xhci, &xhci->op_regs->status);
if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg(xhci, "HW died, freeing TD.\n");
- urb_priv = urb->hcpriv;
for (i = urb_priv->td_cnt;
- i < urb_priv->length && xhci->devs[urb->dev->slot_id];
+ i < urb_priv->length;
i++) {
td = urb_priv->td[i];
if (!list_empty(&td->td_list))
if (!list_empty(&td->cancelled_td_list))
list_del_init(&td->cancelled_td_list);
}
-
- usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock_irqrestore(&xhci->lock, flags);
- usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
- xhci_urb_free_priv(xhci, urb_priv);
- return ret;
- }
- if ((xhci->xhc_state & XHCI_STATE_DYING) ||
- (xhci->xhc_state & XHCI_STATE_HALTED)) {
- xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
- "non-responsive xHCI host.\n",
- urb->ep->desc.bEndpointAddress, urb);
- /* Let the stop endpoint command watchdog timer (which set this
- * state) finish cleaning up the endpoint TD lists. We must
- * have caught it in the middle of dropping a lock and giving
- * back an URB.
- */
- goto done;
- }
-
- xhci_dbg(xhci, "Cancel URB %p\n", urb);
- xhci_dbg(xhci, "Event ring:\n");
- xhci_debug_ring(xhci, xhci->event_ring);
- ep_index = xhci_get_endpoint_index(&urb->ep->desc);
- ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
- ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
- if (!ep_ring) {
- ret = -EINVAL;
- goto done;
+ goto err_giveback;
}
- xhci_dbg(xhci, "Endpoint ring:\n");
- xhci_debug_ring(xhci, ep_ring);
-
- urb_priv = urb->hcpriv;
-
for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
td = urb_priv->td[i];
list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
done:
spin_unlock_irqrestore(&xhci->lock, flags);
return ret;
+
+err_giveback:
+ if (urb_priv)
+ xhci_urb_free_priv(xhci, urb_priv);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
+ return ret;
}
/* Drop an endpoint from a new bandwidth configuration for this device.
if (iface_desc->desc.bInterfaceClass != 0x0A)
return -ENODEV;
+ if (iface_desc->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL)
iface_desc = interface->cur_altsetting;
dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
- if (iface_desc->desc.bNumEndpoints < 1) {
- dev_err(&interface->dev, "Invalid number of endpoints\n");
- retval = -EINVAL;
- goto error;
- }
-
/* set up the endpoint information */
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
/* this one will match for the IOWarrior56 only */
dev->int_out_endpoint = endpoint;
}
+
+ if (!dev->int_in_endpoint) {
+ dev_err(&interface->dev, "no interrupt-in endpoint found\n");
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
+ if (!dev->int_out_endpoint) {
+ dev_err(&interface->dev, "no interrupt-out endpoint found\n");
+ retval = -ENODEV;
+ goto error;
+ }
+ }
+
/* we have to check the report_size often, so remember it in the endianess suitable for our machine */
dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
- /* we can register the device now, as it is ready */
- usb_set_intfdata (interface, dev);
-
- retval = usb_register_dev (interface, &tower_class);
-
- if (retval) {
- /* something prevented us from registering this driver */
- err ("Not able to get a minor for this device.");
- usb_set_intfdata (interface, NULL);
- goto error;
- }
- dev->minor = interface->minor;
-
- /* let the user know what node this device is now attached to */
- dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major "
- "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE),
- USB_MAJOR, dev->minor);
-
/* get the firmware version and log it */
result = usb_control_msg (udev,
usb_rcvctrlpipe(udev, 0),
get_version_reply.minor,
le16_to_cpu(get_version_reply.build_no));
+ /* we can register the device now, as it is ready */
+ usb_set_intfdata (interface, dev);
+
+ retval = usb_register_dev (interface, &tower_class);
+
+ if (retval) {
+ /* something prevented us from registering this driver */
+ err ("Not able to get a minor for this device.");
+ usb_set_intfdata (interface, NULL);
+ goto error;
+ }
+ dev->minor = interface->minor;
+
+ /* let the user know what node this device is now attached to */
+ dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major "
+ "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE),
+ USB_MAJOR, dev->minor);
exit:
dbg(2, "%s: leave, return value 0x%.8lx (dev)", __func__, (long) dev);
interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints < 3) {
+ usb_put_dev(usbdev);
+ return -ENODEV;
+ }
+
/*
* Allocate parport interface
*/
void __iomem *base;
u8 channel_count;
u8 used_channels;
- u8 irq;
+ int irq;
};
usb_rcvctrlpipe(serial->dev, 0),
0xfe, 0xc0, 0, reg,
buf, 1, ARK_TIMEOUT);
- if (result < 0)
+ if (result < 1) {
+ dev_err(&serial->interface->dev,
+ "failed to read register %u: %d\n",
+ reg, result);
+ if (result >= 0)
+ result = -EIO;
+
return result;
- else
- return buf[0];
+ }
+
+ return buf[0];
}
static inline int calc_divisor(int bps)
if (result) {
dbg("%s - usb_serial_generic_open failed: %d",
__func__, result);
- goto err_out;
+ goto err_free;
}
/* remove any data still left: also clears error state */
ark3116_read_reg(serial, UART_RX, buf);
/* read modem status */
- priv->msr = ark3116_read_reg(serial, UART_MSR, buf);
+ result = ark3116_read_reg(serial, UART_MSR, buf);
+ if (result < 0)
+ goto err_close;
+ priv->msr = *buf;
+
/* read line status */
- priv->lsr = ark3116_read_reg(serial, UART_LSR, buf);
+ result = ark3116_read_reg(serial, UART_LSR, buf);
+ if (result < 0)
+ goto err_close;
+ priv->lsr = *buf;
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "submit irq_in urb failed %d\n",
result);
- ark3116_close(port);
- goto err_out;
+ goto err_close;
}
/* activate interrupts */
if (tty)
ark3116_set_termios(tty, port, NULL);
-err_out:
kfree(buf);
+
+ return 0;
+
+err_close:
+ usb_serial_generic_close(port);
+err_free:
+ kfree(buf);
+
return result;
}
* the Net/FreeBSD uchcom.c driver by Takanori Watanabe. Domo arigato.
*/
+#define CH341_REQ_READ_VERSION 0x5F
#define CH341_REQ_WRITE_REG 0x9A
#define CH341_REQ_READ_REG 0x95
-#define CH341_REG_BREAK1 0x05
-#define CH341_REG_BREAK2 0x18
-#define CH341_NBREAK_BITS_REG1 0x01
-#define CH341_NBREAK_BITS_REG2 0x40
-
+#define CH341_REQ_SERIAL_INIT 0xA1
+#define CH341_REQ_MODEM_CTRL 0xA4
+
+#define CH341_REG_BREAK 0x05
+#define CH341_REG_LCR 0x18
+#define CH341_NBREAK_BITS 0x01
+
+#define CH341_LCR_ENABLE_RX 0x80
+#define CH341_LCR_ENABLE_TX 0x40
+#define CH341_LCR_MARK_SPACE 0x20
+#define CH341_LCR_PAR_EVEN 0x10
+#define CH341_LCR_ENABLE_PAR 0x08
+#define CH341_LCR_STOP_BITS_2 0x04
+#define CH341_LCR_CS8 0x03
+#define CH341_LCR_CS7 0x02
+#define CH341_LCR_CS6 0x01
+#define CH341_LCR_CS5 0x00
static int debug;
u8 multi_status_change; /* status changed multiple since last call */
};
+static void ch341_set_termios(struct tty_struct *tty,
+ struct usb_serial_port *port,
+ struct ktermios *old_termios);
+
static int ch341_control_out(struct usb_device *dev, u8 request,
u16 value, u16 index)
{
r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
value, index, NULL, 0, DEFAULT_TIMEOUT);
+ if (r < 0)
+ dev_err(&dev->dev, "failed to send control message: %d\n", r);
return r;
}
r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
value, index, buf, bufsize, DEFAULT_TIMEOUT);
- return r;
+ if (r < bufsize) {
+ if (r >= 0) {
+ dev_err(&dev->dev,
+ "short control message received (%d < %u)\n",
+ r, bufsize);
+ r = -EIO;
+ }
+
+ dev_err(&dev->dev, "failed to receive control message: %d\n",
+ r);
+ return r;
+ }
+
+ return 0;
}
-static int ch341_set_baudrate(struct usb_device *dev,
- struct ch341_private *priv)
+static int ch341_set_baudrate_lcr(struct usb_device *dev,
+ struct ch341_private *priv, u8 lcr)
{
- short a, b;
+ short a;
int r;
unsigned long factor;
short divisor;
factor = 0x10000 - factor;
a = (factor & 0xff00) | divisor;
- b = factor & 0xff;
- r = ch341_control_out(dev, 0x9a, 0x1312, a);
- if (!r)
- r = ch341_control_out(dev, 0x9a, 0x0f2c, b);
+ /*
+ * CH341A buffers data until a full endpoint-size packet (32 bytes)
+ * has been received unless bit 7 is set.
+ */
+ a |= BIT(7);
+
+ r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, a);
+ if (r)
+ return r;
+
+ r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x2518, lcr);
+ if (r)
+ return r;
return r;
}
static int ch341_set_handshake(struct usb_device *dev, u8 control)
{
dbg("ch341_set_handshake(0x%02x)", control);
- return ch341_control_out(dev, 0xa4, ~control, 0);
+ return ch341_control_out(dev, CH341_REQ_MODEM_CTRL, ~control, 0);
}
static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
{
+ const unsigned int size = 2;
char *buffer;
int r;
- const unsigned size = 8;
unsigned long flags;
dbg("ch341_get_status()");
if (!buffer)
return -ENOMEM;
- r = ch341_control_in(dev, 0x95, 0x0706, 0, buffer, size);
+ r = ch341_control_in(dev, CH341_REQ_READ_REG, 0x0706, 0, buffer, size);
if (r < 0)
goto out;
- /* setup the private status if available */
- if (r == 2) {
- r = 0;
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
- priv->multi_status_change = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
- } else
- r = -EPROTO;
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
+ priv->multi_status_change = 0;
+ spin_unlock_irqrestore(&priv->lock, flags);
out: kfree(buffer);
return r;
static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
{
+ const unsigned int size = 2;
char *buffer;
int r;
- const unsigned size = 8;
dbg("ch341_configure()");
return -ENOMEM;
/* expect two bytes 0x27 0x00 */
- r = ch341_control_in(dev, 0x5f, 0, 0, buffer, size);
+ r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size);
if (r < 0)
goto out;
- r = ch341_control_out(dev, 0xa1, 0, 0);
- if (r < 0)
- goto out;
-
- r = ch341_set_baudrate(dev, priv);
+ r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0);
if (r < 0)
goto out;
/* expect two bytes 0x56 0x00 */
- r = ch341_control_in(dev, 0x95, 0x2518, 0, buffer, size);
- if (r < 0)
- goto out;
-
- r = ch341_control_out(dev, 0x9a, 0x2518, 0x0050);
+ r = ch341_control_in(dev, CH341_REQ_READ_REG, 0x2518, 0, buffer, size);
if (r < 0)
goto out;
- /* expect 0xff 0xee */
- r = ch341_get_status(dev, priv);
+ r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x2518, 0x0050);
if (r < 0)
goto out;
- r = ch341_control_out(dev, 0xa1, 0x501f, 0xd90a);
- if (r < 0)
- goto out;
-
- r = ch341_set_baudrate(dev, priv);
+ r = ch341_set_baudrate_lcr(dev, priv, 0);
if (r < 0)
goto out;
r = ch341_set_handshake(dev, priv->line_control);
- if (r < 0)
- goto out;
-
- /* expect 0x9f 0xee */
- r = ch341_get_status(dev, priv);
out: kfree(buffer);
return r;
spin_lock_init(&priv->lock);
priv->baud_rate = DEFAULT_BAUD_RATE;
- priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
r = ch341_configure(serial->dev, priv);
if (r < 0)
dbg("ch341_open()");
- priv->baud_rate = DEFAULT_BAUD_RATE;
-
- r = ch341_configure(serial->dev, priv);
- if (r)
- goto out;
-
- r = ch341_set_handshake(serial->dev, priv->line_control);
- if (r)
- goto out;
-
- r = ch341_set_baudrate(serial->dev, priv);
- if (r)
- goto out;
+ if (tty)
+ ch341_set_termios(tty, port, NULL);
dbg("%s - submitting interrupt urb", __func__);
port->interrupt_in_urb->dev = serial->dev;
if (r) {
dev_err(&port->dev, "%s - failed submitting interrupt urb,"
" error %d\n", __func__, r);
- ch341_close(port);
- return -EPROTO;
+ return r;
+ }
+
+ r = ch341_get_status(port->serial->dev, priv);
+ if (r < 0) {
+ dev_err(&port->dev, "failed to read modem status: %d\n", r);
+ goto err_kill_interrupt_urb;
}
r = usb_serial_generic_open(tty, port);
+ if (r)
+ goto err_kill_interrupt_urb;
+
+ return 0;
-out: return r;
+err_kill_interrupt_urb:
+ usb_kill_urb(port->interrupt_in_urb);
+
+ return r;
}
/* Old_termios contains the original termios settings and
struct ch341_private *priv = usb_get_serial_port_data(port);
unsigned baud_rate;
unsigned long flags;
+ unsigned char ctrl;
+ int r;
+
+ /* redundant changes may cause the chip to lose bytes */
+ if (old_termios && !tty_termios_hw_change(tty->termios, old_termios))
+ return;
dbg("ch341_set_termios()");
baud_rate = tty_get_baud_rate(tty);
- priv->baud_rate = baud_rate;
+ ctrl = CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX | CH341_LCR_CS8;
if (baud_rate) {
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
- spin_unlock_irqrestore(&priv->lock, flags);
- ch341_set_baudrate(port->serial->dev, priv);
- } else {
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
- spin_unlock_irqrestore(&priv->lock, flags);
+ priv->baud_rate = baud_rate;
+
+ r = ch341_set_baudrate_lcr(port->serial->dev, priv, ctrl);
+ if (r < 0 && old_termios) {
+ priv->baud_rate = tty_termios_baud_rate(old_termios);
+ tty_termios_copy_hw(tty->termios, old_termios);
+ }
}
- ch341_set_handshake(port->serial->dev, priv->line_control);
+ spin_lock_irqsave(&priv->lock, flags);
+ if (C_BAUD(tty) == B0)
+ priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
+ else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
+ priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
+ spin_unlock_irqrestore(&priv->lock, flags);
/* Unimplemented:
* (cflag & CSIZE) : data bits [5, 8]
static void ch341_break_ctl(struct tty_struct *tty, int break_state)
{
const uint16_t ch341_break_reg =
- CH341_REG_BREAK1 | ((uint16_t) CH341_REG_BREAK2 << 8);
+ ((uint16_t) CH341_REG_LCR << 8) | CH341_REG_BREAK;
struct usb_serial_port *port = tty->driver_data;
int r;
uint16_t reg_contents;
__func__, break_reg[0], break_reg[1]);
if (break_state != 0) {
dbg("%s - Enter break state requested", __func__);
- break_reg[0] &= ~CH341_NBREAK_BITS_REG1;
- break_reg[1] &= ~CH341_NBREAK_BITS_REG2;
+ break_reg[0] &= ~CH341_NBREAK_BITS;
+ break_reg[1] &= ~CH341_LCR_ENABLE_TX;
} else {
dbg("%s - Leave break state requested", __func__);
- break_reg[0] |= CH341_NBREAK_BITS_REG1;
- break_reg[1] |= CH341_NBREAK_BITS_REG2;
+ break_reg[0] |= CH341_NBREAK_BITS;
+ break_reg[1] |= CH341_LCR_ENABLE_TX;
}
dbg("%s - New ch341 break register contents - reg1: %x, reg2: %x",
__func__, break_reg[0], break_reg[1]);
static int ch341_reset_resume(struct usb_interface *intf)
{
struct usb_device *dev = interface_to_usbdev(intf);
- struct usb_serial *serial = NULL;
- struct ch341_private *priv;
-
- serial = usb_get_intfdata(intf);
- priv = usb_get_serial_port_data(serial->port[0]);
+ struct usb_serial *serial = usb_get_intfdata(intf);
+ struct usb_serial_port *port = serial->port[0];
+ struct ch341_private *priv = usb_get_serial_port_data(port);
+ int ret;
/*reconfigure ch341 serial port after bus-reset*/
ch341_configure(dev, priv);
- usb_serial_resume(intf);
+ if (port->port.flags & ASYNC_INITIALIZED) {
+ ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
+ if (ret) {
+ dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
+ ret);
+ return ret;
+ }
- return 0;
+ ret = ch341_get_status(port->serial->dev, priv);
+ if (ret < 0) {
+ dev_err(&port->dev, "failed to read modem status: %d\n",
+ ret);
+ }
+ }
+
+ return usb_serial_generic_resume(serial);
}
static struct usb_driver ch341_driver = {
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+ { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+ { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
{ USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
{ USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
+ { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
+ { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
dbg("%s", __func__);
+ if (serial->num_bulk_out < serial->num_ports)
+ return -ENODEV;
+
/* allocate the private data structure */
priv = kmalloc(sizeof(struct cyberjack_private), GFP_KERNEL);
if (!priv)
struct usb_serial_port *port = urb->context;
struct tty_struct *tty;
struct digi_port *priv = usb_get_serial_port_data(port);
- int opcode = ((unsigned char *)urb->transfer_buffer)[0];
- int len = ((unsigned char *)urb->transfer_buffer)[1];
- int port_status = ((unsigned char *)urb->transfer_buffer)[2];
- unsigned char *data = ((unsigned char *)urb->transfer_buffer) + 3;
+ unsigned char *buf = urb->transfer_buffer;
+ int opcode;
+ int len;
+ int port_status;
+ unsigned char *data;
int flag, throttled;
- int status = urb->status;
-
- /* do not process callbacks on closed ports */
- /* but do continue the read chain */
- if (urb->status == -ENOENT)
- return 0;
/* short/multiple packet check */
+ if (urb->actual_length < 2) {
+ dev_warn(&port->dev, "short packet received\n");
+ return -1;
+ }
+
+ opcode = buf[0];
+ len = buf[1];
+
if (urb->actual_length != len + 2) {
- dev_err(&port->dev, "%s: INCOMPLETE OR MULTIPLE PACKET, "
- "status=%d, port=%d, opcode=%d, len=%d, "
- "actual_length=%d, status=%d\n", __func__, status,
- priv->dp_port_num, opcode, len, urb->actual_length,
- port_status);
+ dev_err(&port->dev, "malformed packet received: port=%d, opcode=%d, len=%d, actual_length=%u\n",
+ priv->dp_port_num, opcode, len, urb->actual_length);
+ return -1;
+ }
+
+ if (opcode == DIGI_CMD_RECEIVE_DATA && len < 1) {
+ dev_err(&port->dev, "malformed data packet received\n");
return -1;
}
/* receive data */
if (tty && opcode == DIGI_CMD_RECEIVE_DATA) {
+ port_status = buf[2];
+ data = &buf[3];
+
/* get flag from port_status */
flag = 0;
struct usb_serial *serial = port->serial;
struct tty_struct *tty;
struct digi_port *priv = usb_get_serial_port_data(port);
+ unsigned char *buf = urb->transfer_buffer;
int opcode, line, status, val;
int i;
unsigned int rts;
dbg("digi_read_oob_callback: port=%d, len=%d",
priv->dp_port_num, urb->actual_length);
+ if (urb->actual_length < 4)
+ return -1;
+
/* handle each oob command */
- for (i = 0; i < urb->actual_length - 3;) {
- opcode = ((unsigned char *)urb->transfer_buffer)[i++];
- line = ((unsigned char *)urb->transfer_buffer)[i++];
- status = ((unsigned char *)urb->transfer_buffer)[i++];
- val = ((unsigned char *)urb->transfer_buffer)[i++];
+ for (i = 0; i < urb->actual_length - 3; i += 4) {
+ opcode = buf[i];
+ line = buf[i + 1];
+ status = buf[i + 2];
+ val = buf[i + 3];
dbg("digi_read_oob_callback: opcode=%d, line=%d, status=%d, val=%d",
opcode, line, status, val);
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
{ USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
+ { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE,
0, priv->interface,
buf, 1, WDR_TIMEOUT);
- if (rv < 0)
+ if (rv < 1) {
dev_err(&port->dev, "Unable to read latency timer: %i\n", rv);
- else
+ if (rv >= 0)
+ rv = -EIO;
+ } else {
priv->latency = buf[0];
+ }
kfree(buf);
mutex_init(&priv->cfg_lock);
memset(&priv->icount, 0x00, sizeof(priv->icount));
- priv->flags = ASYNC_LOW_LATENCY;
-
if (quirk && quirk->port_probe)
quirk->port_probe(priv);
priv->prev_status = status;
}
+ /* save if the transmitter is empty or not */
+ if (packet[1] & FTDI_RS_TEMT)
+ priv->transmit_empty = 1;
+ else
+ priv->transmit_empty = 0;
+
+ len -= 2;
+ if (!len)
+ return 0; /* status only */
+
+ /*
+ * Break and error status must only be processed for packets with
+ * data payload to avoid over-reporting.
+ */
flag = TTY_NORMAL;
if (packet[1] & FTDI_RS_ERR_MASK) {
/* Break takes precedence over parity, which takes precedence
}
}
- /* save if the transmitter is empty or not */
- if (packet[1] & FTDI_RS_TEMT)
- priv->transmit_empty = 1;
- else
- priv->transmit_empty = 0;
-
- len -= 2;
- if (!len)
- return 0; /* status only */
priv->icount.rx += len;
ch = packet + 2;
FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
0, priv->interface,
buf, len, WDR_TIMEOUT);
- if (ret < 0)
+
+ /* NOTE: We allow short responses and handle that below. */
+ if (ret < 1) {
+ if (ret >= 0)
+ ret = -EIO;
goto out;
+ }
ret = (buf[0] & FTDI_SIO_DSR_MASK ? TIOCM_DSR : 0) |
(buf[0] & FTDI_SIO_CTS_MASK ? TIOCM_CTS : 0) |
#define ATMEL_VID 0x03eb /* Vendor ID */
#define STK541_PID 0x2109 /* Zigbee Controller */
+/*
+ * Texas Instruments
+ */
+#define TI_VID 0x0451
+#define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */
+
/*
* Blackfin gnICE JTAG
* http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
"%s - usb_submit_urb(write bulk) failed with status = %d\n",
__func__, status);
count = status;
+ kfree(buffer);
}
/* we are done with this urb, so let the host driver
int result;
struct usb_serial *serial = ep->serial;
struct edgeport_product_info *product_info = &ep->product_info;
- struct edge_compatibility_descriptor *epic = &ep->epic_descriptor;
+ struct edge_compatibility_descriptor *epic;
struct edge_compatibility_bits *bits;
ep->is_epic = 0;
+
+ epic = kmalloc(sizeof(*epic), GFP_KERNEL);
+ if (!epic)
+ return -ENOMEM;
+
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
USB_REQUEST_ION_GET_EPIC_DESC,
0xC0, 0x00, 0x00,
- &ep->epic_descriptor,
- sizeof(struct edge_compatibility_descriptor),
+ epic, sizeof(*epic),
300);
-
dbg("%s result = %d", __func__, result);
- if (result > 0) {
+ if (result == sizeof(*epic)) {
ep->is_epic = 1;
+ memcpy(&ep->epic_descriptor, epic, sizeof(*epic));
memset(product_info, 0, sizeof(struct edgeport_product_info));
product_info->NumPorts = epic->NumPorts;
dbg(" IOSPWriteLCR : %s", bits->IOSPWriteLCR ? "TRUE": "FALSE");
dbg(" IOSPSetBaudRate : %s", bits->IOSPSetBaudRate ? "TRUE": "FALSE");
dbg(" TrueEdgeport : %s", bits->TrueEdgeport ? "TRUE": "FALSE");
+
+ result = 0;
+ } else if (result >= 0) {
+ dev_warn(&serial->interface->dev, "short epic descriptor received: %d\n",
+ result);
+ result = -EIO;
}
+ kfree(epic);
+
return result;
}
* rom_read
* reads a number of bytes from the Edgeport device starting at the given
* address.
- * If successful returns the number of bytes read, otherwise it returns
- * a negative error number of the problem.
+ * Returns zero on success or a negative error number.
****************************************************************************/
static int rom_read(struct usb_serial *serial, __u16 extAddr,
__u16 addr, __u16 length, __u8 *data)
USB_REQUEST_ION_READ_ROM,
0xC0, addr, extAddr, transfer_buffer,
current_length, 300);
- if (result < 0)
+ if (result < current_length) {
+ if (result >= 0)
+ result = -EIO;
break;
+ }
memcpy(data, transfer_buffer, current_length);
length -= current_length;
addr += current_length;
data += current_length;
+
+ result = 0;
}
kfree(transfer_buffer);
EDGE_MANUF_DESC_LEN,
(__u8 *)(&edge_serial->manuf_descriptor));
- if (response < 1)
+ if (response < 0) {
dev_err(&edge_serial->serial->dev->dev,
- "error in getting manufacturer descriptor\n");
- else {
+ "error in getting manufacturer descriptor: %d\n",
+ response);
+ } else {
char string[30];
dbg("**Manufacturer Descriptor");
dbg(" RomSize: %dK",
EDGE_BOOT_DESC_LEN,
(__u8 *)(&edge_serial->boot_descriptor));
- if (response < 1)
+ if (response < 0) {
dev_err(&edge_serial->serial->dev->dev,
- "error in getting boot descriptor\n");
- else {
+ "error in getting boot descriptor: %d\n",
+ response);
+ } else {
dbg("**Boot Descriptor:");
dbg(" BootCodeLength: %d",
le16_to_cpu(edge_serial->boot_descriptor.BootCodeLength));
EDGE_COMPATIBILITY_MASK1,
EDGE_COMPATIBILITY_MASK2 };
+ if (serial->num_bulk_in < 1 || serial->num_interrupt_in < 1) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
dev = serial->dev;
/* create our private serial structure */
dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name);
/* Read the epic descriptor */
- if (get_epic_descriptor(edge_serial) <= 0) {
+ if (get_epic_descriptor(edge_serial) < 0) {
/* memcpy descriptor to Supports structures */
memcpy(&edge_serial->epic_descriptor.Supports, descriptor,
sizeof(struct edge_compatibility_bits));
dbg("%s - STAYING IN BOOT MODE", __func__);
serial->product_info.TiMode = TI_MODE_BOOT;
- return 0;
+ return 1;
}
function = TIUMP_GET_FUNC_FROM_CODE(data[0]);
dbg("%s - port_number %d, function %d, info 0x%x",
__func__, port_number, function, data[1]);
+
+ if (port_number >= edge_serial->serial->num_ports) {
+ dev_err(&urb->dev->dev, "bad port number %d\n", port_number);
+ goto exit;
+ }
+
port = edge_serial->serial->port[port_number];
edge_port = usb_get_serial_port_data(port);
if (!edge_port) {
port_number = edge_port->port->number - edge_port->port->serial->minor;
- if (edge_port->lsr_event) {
+ if (urb->actual_length > 0 && edge_port->lsr_event) {
edge_port->lsr_event = 0;
dbg("%s ===== Port %u LSR Status = %02x, Data = %02x ======",
__func__, port_number, edge_port->lsr_mask, *data);
dev = serial->dev;
+ /* Make sure we have the required endpoints when in download mode. */
+ if (serial->interface->cur_altsetting->desc.bNumEndpoints > 1) {
+ if (serial->num_bulk_in < serial->num_ports ||
+ serial->num_bulk_out < serial->num_ports)
+ return -ENODEV;
+ }
+
/* create our private serial structure */
edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
if (edge_serial == NULL) {
usb_set_serial_data(serial, edge_serial);
status = download_fw(edge_serial);
- if (status) {
+ if (status < 0) {
kfree(edge_serial);
return status;
}
+ if (status > 0)
+ return 1; /* bind but do not register any ports */
+
/* set up our port private structures */
for (i = 0; i < serial->num_ports; ++i) {
edge_port = kzalloc(sizeof(struct edgeport_port), GFP_KERNEL);
for (i = 0; i < serial->num_ports; ++i) {
edge_port = usb_get_serial_port_data(serial->port[i]);
+ if (!edge_port)
+ continue;
kfifo_free(&edge_port->write_fifo);
kfree(edge_port);
}
static int iuu_startup(struct usb_serial *serial)
{
+ unsigned char num_ports = serial->num_ports;
struct iuu_private *priv;
+
+ if (serial->num_bulk_in < num_ports || serial->num_bulk_out < num_ports)
+ return -ENODEV;
+
priv = kzalloc(sizeof(struct iuu_private), GFP_KERNEL);
dbg("%s- priv allocation success", __func__);
if (!priv)
struct usb_serial_port *port = urb->context;
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
+ unsigned int len = urb->actual_length;
int retval;
int status = urb->status;
struct keyspan_pda_private *priv;
goto exit;
}
+ if (len < 1) {
+ dev_warn(&port->dev, "short message received\n");
+ goto exit;
+ }
+
/* see if the message is data or a status interrupt */
switch (data[0]) {
case 0:
tty = tty_port_tty_get(&port->port);
/* rest of message is rx data */
- if (tty && urb->actual_length) {
- tty_insert_flip_string(tty, data + 1,
- urb->actual_length - 1);
- tty_flip_buffer_push(tty);
- }
+ if (!tty || len < 2)
+ break;
+ tty_insert_flip_string(tty, data + 1, len - 1);
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
break;
case 1:
/* status interrupt */
+ if (len < 3) {
+ dev_warn(&port->dev, "short interrupt message received\n");
+ break;
+ }
dbg(" rx int, d1=%d, d2=%d", data[1], data[2]);
switch (data[1]) {
case 1: /* modemline change */
static int keyspan_pda_startup(struct usb_serial *serial)
{
-
+ unsigned char num_ports = serial->num_ports;
struct keyspan_pda_private *priv;
+ if (serial->num_bulk_out < num_ports ||
+ serial->num_interrupt_in < num_ports) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
/* allocate the private data structures for all ports. Well, for all
one ports. */
status_buf, KLSI_STATUSBUF_LEN,
10000
);
- if (rc < 0)
- dev_err(&port->dev, "Reading line status failed (error = %d)\n",
- rc);
- else {
+ if (rc != KLSI_STATUSBUF_LEN) {
+ dev_err(&port->dev, "reading line status failed: %d\n", rc);
+ if (rc >= 0)
+ rc = -EIO;
+ } else {
status = get_unaligned_le16(status_buf);
dev_info(&port->serial->dev->dev, "read status %x %x",
rc = usb_serial_generic_open(tty, port);
if (rc) {
retval = rc;
- goto exit;
+ goto err_free_cfg;
}
rc = usb_control_msg(port->serial->dev,
if (rc < 0) {
dev_err(&port->dev, "Enabling read failed (error = %d)\n", rc);
retval = rc;
+ goto err_generic_close;
} else
dbg("%s - enabled reading", __func__);
rc = klsi_105_get_line_state(port, &line_state);
- if (rc >= 0) {
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_state = line_state;
- spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - read line state 0x%lx", __func__, line_state);
- retval = 0;
- } else
+ if (rc < 0) {
retval = rc;
+ goto err_disable_read;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->line_state = line_state;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__,
+ line_state);
-exit:
+ return 0;
+
+err_disable_read:
+ usb_control_msg(port->serial->dev,
+ usb_sndctrlpipe(port->serial->dev, 0),
+ KL5KUSB105A_SIO_CONFIGURE,
+ USB_TYPE_VENDOR | USB_DIR_OUT,
+ KL5KUSB105A_SIO_CONFIGURE_READ_OFF,
+ 0, /* index */
+ NULL, 0,
+ KLSI_TIMEOUT);
+err_generic_close:
+ usb_serial_generic_close(port);
+err_free_cfg:
kfree(cfg);
+
return retval;
}
struct usb_host_interface *altsetting;
struct usb_host_endpoint *endpoint;
+ if (serial->num_interrupt_out < serial->num_ports) {
+ dev_err(&serial->interface->dev, "missing interrupt-out endpoint\n");
+ return -ENODEV;
+ }
+
priv = kmalloc(sizeof(struct kobil_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
MCT_U232_GET_REQUEST_TYPE,
0, 0, buf, MCT_U232_GET_MODEM_STAT_SIZE,
WDR_TIMEOUT);
- if (rc < 0) {
+ if (rc < MCT_U232_GET_MODEM_STAT_SIZE) {
dev_err(&serial->dev->dev,
"Get MODEM STATus failed (error = %d)\n", rc);
+
+ if (rc >= 0)
+ rc = -EIO;
+
*msr = 0;
} else {
*msr = buf[0];
static int debug;
-static struct usb_serial_driver moschip7720_2port_driver;
-
#define USB_VENDOR_ID_MOSCHIP 0x9710
#define MOSCHIP_DEVICE_ID_7720 0x7720
#define MOSCHIP_DEVICE_ID_7715 0x7715
tty_kref_put(tty);
}
-/*
- * mos77xx_probe
- * this function installs the appropriate read interrupt endpoint callback
- * depending on whether the device is a 7720 or 7715, thus avoiding costly
- * run-time checks in the high-frequency callback routine itself.
- */
-static int mos77xx_probe(struct usb_serial *serial,
- const struct usb_device_id *id)
-{
- if (id->idProduct == MOSCHIP_DEVICE_ID_7715)
- moschip7720_2port_driver.read_int_callback =
- mos7715_interrupt_callback;
- else
- moschip7720_2port_driver.read_int_callback =
- mos7720_interrupt_callback;
-
- return 0;
-}
-
static int mos77xx_calc_num_ports(struct usb_serial *serial)
{
u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
return -ENODEV;
}
+ if (serial->num_bulk_in < 2 || serial->num_bulk_out < 2) {
+ dev_err(&serial->interface->dev, "missing bulk endpoints\n");
+ return -ENODEV;
+ }
+
product = le16_to_cpu(serial->dev->descriptor.idProduct);
dev = serial->dev;
tmp->interrupt_in_endpointAddress;
serial->port[1]->interrupt_in_urb = NULL;
serial->port[1]->interrupt_in_buffer = NULL;
+
+ if (serial->port[0]->interrupt_in_urb) {
+ struct urb *urb = serial->port[0]->interrupt_in_urb;
+
+ urb->complete = mos7715_interrupt_callback;
+ }
}
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
(__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
- /* start the interrupt urb */
- ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
- if (ret_val)
- dev_err(&dev->dev,
- "%s - Error %d submitting control urb\n",
- __func__, ret_val);
-
#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
if (product == MOSCHIP_DEVICE_ID_7715) {
ret_val = mos7715_parport_init(serial);
return ret_val;
}
#endif
+ /* start the interrupt urb */
+ ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
+ if (ret_val) {
+ dev_err(&dev->dev, "failed to submit interrupt urb: %d\n",
+ ret_val);
+ }
+
/* LSR For Port 1 */
read_mos_reg(serial, 0, LSR, &data);
dbg("LSR:%x", data);
{
int i;
+ usb_kill_urb(serial->port[0]->interrupt_in_urb);
+
#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
/* close the parallel port */
.close = mos7720_close,
.throttle = mos7720_throttle,
.unthrottle = mos7720_unthrottle,
- .probe = mos77xx_probe,
.attach = mos7720_startup,
.release = mos7720_release,
.ioctl = mos7720_ioctl,
.chars_in_buffer = mos7720_chars_in_buffer,
.break_ctl = mos7720_break,
.read_bulk_callback = mos7720_bulk_in_callback,
- .read_int_callback = NULL /* dynamically assigned in probe() */
+ .read_int_callback = mos7720_interrupt_callback,
};
static int __init moschip7720_init(void)
* (can't set it up in mos7840_startup as the structures *
* were not set up at that time.) */
if (port0->open_ports == 1) {
+ /* FIXME: Buffer never NULL, so URB is not submitted. */
if (serial->port[0]->interrupt_in_buffer == NULL) {
/* set up interrupt urb */
usb_fill_int_urb(serial->port[0]->interrupt_in_urb,
serial,
serial->port[0]->interrupt_in_urb->interval);
- /* start interrupt read for mos7840 *
- * will continue as long as mos7840 is connected */
-
+ /* start interrupt read for mos7840 */
response =
usb_submit_urb(serial->port[0]->interrupt_in_urb,
GFP_KERNEL);
return -1;
}
+ if (serial->num_bulk_in < serial->num_ports ||
+ serial->num_bulk_out < serial->num_ports ||
+ serial->num_interrupt_in < 1) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
dev = serial->dev;
dbg("%s", "Entering...");
struct omninet_data *od;
struct usb_serial_port *port = serial->port[0];
+ /* The second bulk-out endpoint is used for writing. */
+ if (serial->num_bulk_out < 2) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
od = kmalloc(sizeof(struct omninet_data), GFP_KERNEL);
if (!od) {
dev_err(&port->dev, "%s- kmalloc(%Zd) failed.\n",
static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
- struct usb_serial_port *wport;
int result = 0;
dbg("%s - port %d", __func__, port->number);
- wport = serial->port[1];
- tty_port_tty_set(&wport->port, tty);
-
/* Start reading from the device */
usb_fill_bulk_urb(port->read_urb, serial->dev,
usb_rcvbulkpipe(serial->dev,
#define BANDRICH_PRODUCT_1012 0x1012
#define QUALCOMM_VENDOR_ID 0x05C6
+/* These Quectel products use Qualcomm's vendor ID */
+#define QUECTEL_PRODUCT_UC20 0x9003
+#define QUECTEL_PRODUCT_UC15 0x9090
+
+#define QUECTEL_VENDOR_ID 0x2c7c
+/* These Quectel products use Quectel's vendor ID */
+#define QUECTEL_PRODUCT_EC21 0x0121
+#define QUECTEL_PRODUCT_EC25 0x0125
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
#define TELIT_PRODUCT_CC864_SINGLE 0x1006
#define TELIT_PRODUCT_DE910_DUAL 0x1010
#define TELIT_PRODUCT_UE910_V2 0x1012
+#define TELIT_PRODUCT_LE922_USBCFG1 0x1040
+#define TELIT_PRODUCT_LE922_USBCFG2 0x1041
#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
#define TELIT_PRODUCT_LE920 0x1200
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
- { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
+ /* Quectel products using Qualcomm vendor ID */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ /* Quectel products using Quectel vendor ID */
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
+ .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
static int oti6858_startup(struct usb_serial *serial)
{
struct usb_serial_port *port = serial->port[0];
+ unsigned char num_ports = serial->num_ports;
struct oti6858_private *priv;
int i;
+ if (serial->num_bulk_in < num_ports ||
+ serial->num_bulk_out < num_ports ||
+ serial->num_interrupt_in < num_ports) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
for (i = 0; i < serial->num_ports; ++i) {
priv = kzalloc(sizeof(struct oti6858_private), GFP_KERNEL);
if (!priv)
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
+ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
{ USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
static int pl2303_startup(struct usb_serial *serial)
{
struct pl2303_private *priv;
+ unsigned char num_ports = serial->num_ports;
enum pl2303_type type = type_0;
unsigned char *buf;
int i;
+ if (serial->num_bulk_in < num_ports ||
+ serial->num_bulk_out < num_ports ||
+ serial->num_interrupt_in < num_ports) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
buf = kmalloc(10, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
#define ATEN_VENDOR_ID 0x0557
#define ATEN_VENDOR_ID2 0x0547
#define ATEN_PRODUCT_ID 0x2008
+#define ATEN_PRODUCT_ID2 0x2118
#define IODATA_VENDOR_ID 0x04bb
#define IODATA_PRODUCT_ID 0x0a03
if (!safe)
goto out;
+ if (length < 2) {
+ dev_err(&port->dev, "malformed packet\n");
+ return;
+ }
+
fcs = fcs_compute10(data, length, CRC10_INITFCS);
if (fcs) {
dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
int i;
enum spcp8x5_type type = SPCP825_007_TYPE;
u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
+ unsigned char num_ports = serial->num_ports;
+
+ if (serial->num_bulk_in < num_ports ||
+ serial->num_bulk_out < num_ports) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
if (product == 0x0201)
type = SPCP825_007_TYPE;
static inline int ssu100_getdevice(struct usb_device *dev, u8 *data)
{
- return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
- QT_SET_GET_DEVICE, 0xc0, 0, 0,
- data, 3, 300);
+ int ret;
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ QT_SET_GET_DEVICE, 0xc0, 0, 0,
+ data, 3, 300);
+ if (ret < 3) {
+ if (ret >= 0)
+ ret = -EIO;
+ }
+
+ return ret;
}
static inline int ssu100_getregister(struct usb_device *dev,
unsigned short reg,
u8 *data)
{
- return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
- QT_SET_GET_REGISTER, 0xc0, reg,
- uart, data, sizeof(*data), 300);
+ int ret;
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ QT_SET_GET_REGISTER, 0xc0, reg,
+ uart, data, sizeof(*data), 300);
+ if (ret < sizeof(*data)) {
+ if (ret >= 0)
+ ret = -EIO;
+ }
+ return ret;
}
QT_OPEN_CLOSE_CHANNEL,
QT_TRANSFER_IN, 0x01,
0, data, 2, 300);
- if (result < 0) {
+ if (result < 2) {
dbg("%s - open failed %i", __func__, result);
+ if (result >= 0)
+ result = -EIO;
kfree(data);
return result;
}
goto free_tdev;
}
+ if (serial->num_bulk_in < serial->num_ports ||
+ serial->num_bulk_out < serial->num_ports) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ status = -ENODEV;
+ goto free_tdev;
+ }
+
/* set up port structures */
for (i = 0; i < serial->num_ports; ++i) {
tport = kzalloc(sizeof(struct ti_port), GFP_KERNEL);
(USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT),
value, moduleid, data, size, 1000);
- if (status == size)
- status = 0;
-
- if (status > 0)
- status = -ECOMM;
+ if (status < 0)
+ return status;
- return status;
+ return 0;
}
if (status == size)
status = 0;
-
- if (status > 0)
+ else if (status >= 0)
status = -ECOMM;
return status;
/* COMMAND STAGE */
/* let's send the command via the control pipe */
+ /*
+ * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack.
+ * Stack may be vmallocated. So no DMA for us. Make a copy.
+ */
+ memcpy(us->iobuf, srb->cmnd, srb->cmd_len);
result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
US_CBI_ADSC,
USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0,
- us->ifnum, srb->cmnd, srb->cmd_len);
+ us->ifnum, us->iobuf, srb->cmd_len);
/* check the return code for the command */
US_DEBUGP("Call to usb_stor_ctrl_transfer() returned %d\n", result);
int result;
struct device *dev = &iface->dev;
+ if (iface->cur_altsetting->desc.bNumEndpoints < 3)
+ return -ENODEV;
+
result = wa_rpipes_create(wa);
if (result < 0)
goto error_rpipes_create;
struct hwarc *hwarc;
struct device *dev = &iface->dev;
+ if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
result = -ENOMEM;
uwb_rc = uwb_rc_alloc();
if (uwb_rc == NULL) {
result);
}
+ if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
result = -ENOMEM;
i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
if (i1480_usb == NULL) {
struct uwb_rc *rc = NULL;
dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }
+
return rc;
}
if (dev) {
rc = dev_get_drvdata(dev);
__uwb_rc_get(rc);
+ put_device(dev);
}
+
return rc;
}
EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
dev = class_find_device(&uwb_rc_class, NULL, (void *)grandpa_dev,
find_rc_grandpa);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }
+
return rc;
}
EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
dev = class_find_device(&uwb_rc_class, NULL, (void *)addr,
find_rc_dev);
- if (dev)
+ if (dev) {
rc = dev_get_drvdata(dev);
+ put_device(dev);
+ }
return rc;
}
return 1;
if (regno < 16) {
- red >>= 8;
- green >>= 8;
- blue >>= 8;
+ red >>= 16 - info->var.red.length;
+ green >>= 16 - info->var.green.length;
+ blue >>= 16 - info->var.blue.length;
((u32 *)(info->pseudo_palette))[regno] =
(red << info->var.red.offset) |
(green << info->var.green.offset) |
int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
{
- int tooff = 0, fromoff = 0;
- int size;
+ unsigned int tooff = 0, fromoff = 0;
+ size_t size;
if (to->start > from->start)
fromoff = to->start - from->start;
else
tooff = from->start - to->start;
- size = to->len - tooff;
- if (size > (int) (from->len - fromoff))
- size = from->len - fromoff;
- if (size <= 0)
+ if (fromoff >= from->len || tooff >= to->len)
+ return -EINVAL;
+
+ size = min_t(size_t, to->len - tooff, from->len - fromoff);
+ if (size == 0)
return -EINVAL;
size *= sizeof(u16);
int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
{
- int tooff = 0, fromoff = 0;
- int size;
+ unsigned int tooff = 0, fromoff = 0;
+ size_t size;
if (to->start > from->start)
fromoff = to->start - from->start;
else
tooff = from->start - to->start;
- size = to->len - tooff;
- if (size > (int) (from->len - fromoff))
- size = from->len - fromoff;
- if (size <= 0)
+ if (fromoff >= from->len || tooff >= to->len)
+ return -EINVAL;
+
+ size = min_t(size_t, to->len - tooff, from->len - fromoff);
+ if (size == 0)
return -EINVAL;
size *= sizeof(u16);
break;
case XenbusStateInitWait:
-InitWait:
xenbus_switch_state(dev, XenbusStateConnected);
break;
* get Connected twice here.
*/
if (dev->state != XenbusStateConnected)
- goto InitWait; /* no InitWait seen yet, fudge it */
+ /* no InitWait seen yet, fudge it */
+ xenbus_switch_state(dev, XenbusStateConnected);
if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
"request-update", "%d", &val) < 0)
all_vm_events(events);
si_meminfo(&i);
+#ifdef CONFIG_VM_EVENT_COUNTERS
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
pages_to_bytes(events[PSWPIN]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
pages_to_bytes(events[PSWPOUT]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
+#endif
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
pages_to_bytes(i.freeram));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
* Prime this virtqueue with one buffer so the hypervisor can
* use it to signal us later.
*/
+ update_balloon_stats(vb);
+
sg_init_one(&sg, vb->stats, sizeof vb->stats);
if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb) < 0)
BUG();
return true; /* already a holder */
else if (bdev->bd_holder != NULL)
return false; /* held by someone else */
- else if (bdev->bd_contains == bdev)
+ else if (whole == bdev)
return true; /* is a whole device which isn't held */
else if (whole->bd_holder == bd_may_claim)
#ifdef CONFIG_COMPAT
long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
+ /*
+ * These all access 32-bit values anyway so no further
+ * handling is necessary.
+ */
switch (cmd) {
case FS_IOC32_GETFLAGS:
cmd = FS_IOC_GETFLAGS;
case FS_IOC32_GETVERSION:
cmd = FS_IOC_GETVERSION;
break;
- default:
- return -ENOIOCTLCMD;
}
return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
next:
/* check the next slot in the tree to see if it is a valid item */
nritems = btrfs_header_nritems(path->nodes[0]);
+ path->slots[0]++;
if (path->slots[0] >= nritems) {
ret = btrfs_next_leaf(root, path);
if (ret)
goto out;
- } else {
- path->slots[0]++;
}
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
struct ceph_mds_reply_info_parsed *info,
int features)
{
- if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
+ u32 op = le32_to_cpu(info->head->op);
+
+ if (op == CEPH_MDS_OP_GETFILELOCK)
return parse_reply_info_filelock(p, end, info, features);
else
return parse_reply_info_dir(p, end, info, features);
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/fdtable.h>
+#include <linux/freezer.h>
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
if (write) {
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
+ unsigned long ptr_size;
struct rlimit *rlim;
+ /*
+ * Since the stack will hold pointers to the strings, we
+ * must account for them as well.
+ *
+ * The size calculation is the entire vma while each arg page is
+ * built, so each time we get here it's calculating how far it
+ * is currently (rather than each call being just the newly
+ * added size from the arg page). As a result, we need to
+ * always add the entire size of the pointers, so that on the
+ * last call to get_arg_page() we'll actually have the entire
+ * correct size.
+ */
+ ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
+ if (ptr_size > ULONG_MAX - size)
+ goto fail;
+ size += ptr_size;
+
acct_arg_size(bprm, size / PAGE_SIZE);
/*
* to work from.
*/
rlim = current->signal->rlim;
- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
- put_page(page);
- return NULL;
- }
+ if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
+ goto fail;
}
return page;
+
+fail:
+ put_page(page);
+ return NULL;
}
static void put_arg_page(struct page *page)
complete(vfork_done);
}
- if (core_waiters)
+ if (core_waiters > 0) {
+ freezer_do_not_count();
wait_for_completion(&core_state->startup);
+ freezer_count();
+ }
fail:
return core_waiters;
}
#define EXT4_MAX_BLOCK_SIZE 65536
#define EXT4_MIN_BLOCK_LOG_SIZE 10
#define EXT4_MAX_BLOCK_LOG_SIZE 16
+#define EXT4_MAX_CLUSTER_LOG_SIZE 30
#ifdef __KERNEL__
# define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize)
#else
return ret ? ret : copied;
}
+/*
+ * This is a private version of page_zero_new_buffers() which doesn't
+ * set the buffer to be dirty, since in data=journalled mode we need
+ * to call ext4_handle_dirty_metadata() instead.
+ */
+static void ext4_journalled_zero_new_buffers(handle_t *handle,
+ struct page *page,
+ unsigned from, unsigned to)
+{
+ unsigned int block_start = 0, block_end;
+ struct buffer_head *head, *bh;
+
+ bh = head = page_buffers(page);
+ do {
+ block_end = block_start + bh->b_size;
+ if (buffer_new(bh)) {
+ if (block_end > from && block_start < to) {
+ if (!PageUptodate(page)) {
+ unsigned start, size;
+
+ start = max(from, block_start);
+ size = min(to, block_end) - start;
+
+ zero_user(page, start, size);
+ write_end_fn(handle, bh);
+ }
+ clear_buffer_new(bh);
+ }
+ }
+ block_start = block_end;
+ bh = bh->b_this_page;
+ } while (bh != head);
+}
+
static int ext4_journalled_write_end(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
BUG_ON(!ext4_handle_valid(handle));
- if (copied < len) {
- if (!PageUptodate(page))
- copied = 0;
- page_zero_new_buffers(page, from+copied, to);
+ if (unlikely(copied < len) && !PageUptodate(page)) {
+ copied = 0;
+ ext4_journalled_zero_new_buffers(handle, page, from, to);
+ } else {
+ if (unlikely(copied < len))
+ ext4_journalled_zero_new_buffers(handle, page,
+ from + copied, to);
+ ret = walk_page_buffers(handle, page_buffers(page), from,
+ from + copied, &partial,
+ write_end_fn);
+ if (!partial)
+ SetPageUptodate(page);
}
-
- ret = walk_page_buffers(handle, page_buffers(page), from,
- to, &partial, write_end_fn);
- if (!partial)
- SetPageUptodate(page);
new_i_size = pos + copied;
if (new_i_size > inode->i_size)
i_size_write(inode, pos+copied);
struct inode *inode;
journal_t *journal = EXT4_SB(sb)->s_journal;
long ret;
+ loff_t size;
int block;
inode = iget_locked(sb, ino);
ei->i_file_acl |=
((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
inode->i_size = ext4_isize(raw_inode);
+ if ((size = i_size_read(inode)) < 0) {
+ EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
+ ret = -EIO;
+ goto bad_inode;
+ }
ei->i_disksize = inode->i_size;
#ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0;
* Fix up interoperability with old kernels. Otherwise, old inodes get
* re-used with the upper 16 bits of the uid/gid intact
*/
- if (!ei->i_dtime) {
+ if (ei->i_dtime && list_empty(&ei->i_orphan)) {
+ raw_inode->i_uid_high = 0;
+ raw_inode->i_gid_high = 0;
+ } else {
raw_inode->i_uid_high =
cpu_to_le16(high_16_bits(inode->i_uid));
raw_inode->i_gid_high =
cpu_to_le16(high_16_bits(inode->i_gid));
- } else {
- raw_inode->i_uid_high = 0;
- raw_inode->i_gid_high = 0;
}
} else {
raw_inode->i_uid_low =
ext4_grpblk_t min;
ext4_grpblk_t max;
ext4_grpblk_t chunk;
- unsigned short border;
+ unsigned int border;
BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
struct ext4_buddy e4b;
struct sg {
struct ext4_group_info info;
- ext4_grpblk_t counters[16];
+ ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
} sg;
group--;
if (ar->pright && start + size - 1 >= ar->lright)
size -= start + size - ar->lright;
+ /*
+ * Trim allocation request for filesystems with artificially small
+ * groups.
+ */
+ if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
+ size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
+
end = start + size;
/* check we don't cross already preallocated blocks */
frame->entries = entries;
frame->at = entries;
frame->bh = bh;
- bh = bh2;
retval = ext4_handle_dirty_metadata(handle, dir, frame->bh);
if (retval)
goto out_frames;
- retval = ext4_handle_dirty_metadata(handle, dir, bh);
+ retval = ext4_handle_dirty_metadata(handle, dir, bh2);
if (retval)
goto out_frames;
- de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
+ de = do_split(handle,dir, &bh2, frame, &hinfo, &retval);
if (!de) {
goto out_frames;
}
- dx_release(frames);
- retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
- brelse(bh);
- return retval;
+ retval = add_dirent_to_buf(handle, dentry, inode, de, bh2);
out_frames:
/*
* Even if the block split failed, we have to properly write
* out all the changes we did so far. Otherwise we can end up
* with corrupted filesystem.
*/
- ext4_mark_inode_dirty(handle, dir);
+ if (retval)
+ ext4_mark_inode_dirty(handle, dir);
dx_release(frames);
+ brelse(bh2);
return retval;
}
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
+ int aborted = 0;
int i, err;
ext4_unregister_li_request(sb);
ext4_commit_super(sb, 1);
if (sbi->s_journal) {
+ aborted = is_journal_aborted(sbi->s_journal);
err = jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
- if (err < 0)
+ if ((err < 0) && !aborted)
ext4_abort(sb, "Couldn't clean up the journal");
}
ext4_ext_release(sb);
ext4_xattr_put_super(sb);
- if (!(sb->s_flags & MS_RDONLY)) {
+ if (!(sb->s_flags & MS_RDONLY) && !aborted) {
EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
es->s_state = cpu_to_le16(sbi->s_mount_state);
ext4_commit_super(sb, 1);
ext4_set_bit(s++, buf);
count++;
}
- for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
- ext4_set_bit(EXT4_B2C(sbi, s++), buf);
- count++;
+ j = ext4_bg_num_gdb(sb, grp);
+ if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
+ ext4_error(sb, "Invalid number of block group "
+ "descriptor blocks: %d", j);
+ j = EXT4_BLOCKS_PER_GROUP(sb) - s;
}
+ count += j;
+ for (; j > 0; j--)
+ ext4_set_bit(EXT4_B2C(sbi, s++), buf);
}
if (!count)
return 0;
char *orig_data = kstrdup(data, GFP_KERNEL);
struct buffer_head *bh;
struct ext4_super_block *es = NULL;
- struct ext4_sb_info *sbi;
+ struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
ext4_fsblk_t block;
ext4_fsblk_t sb_block = get_sb_block(&data);
ext4_fsblk_t logical_sb_block;
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
ext4_group_t first_not_zeroed;
- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- if (!sbi)
- goto out_free_orig;
+ if ((data && !orig_data) || !sbi)
+ goto out_free_base;
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
- if (!sbi->s_blockgroup_lock) {
- kfree(sbi);
- goto out_free_orig;
- }
+ if (!sbi->s_blockgroup_lock)
+ goto out_free_base;
+
sb->s_fs_info = sbi;
sbi->s_mount_opt = 0;
sbi->s_resuid = EXT4_DEF_RESUID;
*/
sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
- if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
- &journal_devnum, &journal_ioprio, NULL, 0)) {
- ext4_msg(sb, KERN_WARNING,
- "failed to parse options in superblock: %s",
- sbi->s_es->s_mount_opts);
+ if (sbi->s_es->s_mount_opts[0]) {
+ char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
+ sizeof(sbi->s_es->s_mount_opts),
+ GFP_KERNEL);
+ if (!s_mount_opts)
+ goto failed_mount;
+ if (!parse_options(s_mount_opts, sb, &journal_devnum,
+ &journal_ioprio, NULL, 0)) {
+ ext4_msg(sb, KERN_WARNING,
+ "failed to parse options in superblock: %s",
+ s_mount_opts);
+ }
+ kfree(s_mount_opts);
}
if (!parse_options((char *) data, sb, &journal_devnum,
&journal_ioprio, NULL, 0))
if (blocksize < EXT4_MIN_BLOCK_SIZE ||
blocksize > EXT4_MAX_BLOCK_SIZE) {
ext4_msg(sb, KERN_ERR,
- "Unsupported filesystem blocksize %d", blocksize);
+ "Unsupported filesystem blocksize %d (%d log_block_size)",
+ blocksize, le32_to_cpu(es->s_log_block_size));
+ goto failed_mount;
+ }
+ if (le32_to_cpu(es->s_log_block_size) >
+ (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+ ext4_msg(sb, KERN_ERR,
+ "Invalid log block size: %u",
+ le32_to_cpu(es->s_log_block_size));
goto failed_mount;
}
sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
- if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
- goto cantfind_ext4;
sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
if (sbi->s_inodes_per_block == 0)
goto cantfind_ext4;
+ if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
+ sbi->s_inodes_per_group > blocksize * 8) {
+ ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
+ sbi->s_blocks_per_group);
+ goto failed_mount;
+ }
sbi->s_itb_per_group = sbi->s_inodes_per_group /
sbi->s_inodes_per_block;
sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
"block size (%d)", clustersize, blocksize);
goto failed_mount;
}
+ if (le32_to_cpu(es->s_log_cluster_size) >
+ (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+ ext4_msg(sb, KERN_ERR,
+ "Invalid log cluster size: %u",
+ le32_to_cpu(es->s_log_cluster_size));
+ goto failed_mount;
+ }
sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
le32_to_cpu(es->s_log_block_size);
sbi->s_clusters_per_group =
}
sbi->s_cluster_ratio = clustersize / blocksize;
- if (sbi->s_inodes_per_group > blocksize * 8) {
- ext4_msg(sb, KERN_ERR,
- "#inodes per group too big: %lu",
- sbi->s_inodes_per_group);
- goto failed_mount;
- }
-
/*
* Test whether we have more sectors than will fit in sector_t,
* and whether the max offset is addressable by the page cache.
descr = "out journal";
ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
- "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
+ "Opts: %.*s%s%s", descr,
+ (int) sizeof(sbi->s_es->s_mount_opts),
+ sbi->s_es->s_mount_opts,
*sbi->s_es->s_mount_opts ? "; " : "", orig_data);
if (es->s_error_count)
out_fail:
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
+out_free_base:
kfree(sbi);
-out_free_orig:
kfree(orig_data);
return ret;
}
static int fuse_setattr(struct dentry *entry, struct iattr *attr)
{
- if (attr->ia_valid & ATTR_FILE)
- return fuse_do_setattr(entry, attr, attr->ia_file);
- else
- return fuse_do_setattr(entry, attr, NULL);
+ struct inode *inode = entry->d_inode;
+ struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL;
+ int ret;
+
+ if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) {
+ attr->ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID |
+ ATTR_MODE);
+ /*
+ * ia_mode calculation may have used stale i_mode. Refresh and
+ * recalculate.
+ */
+ ret = fuse_do_getattr(inode, NULL, file);
+ if (ret)
+ return ret;
+
+ attr->ia_mode = inode->i_mode;
+ if (inode->i_mode & S_ISUID) {
+ attr->ia_valid |= ATTR_MODE;
+ attr->ia_mode &= ~S_ISUID;
+ }
+ if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
+ attr->ia_valid |= ATTR_MODE;
+ attr->ia_mode &= ~S_ISGID;
+ }
+ }
+ if (!attr->ia_valid)
+ return 0;
+
+ ret = fuse_do_setattr(entry, attr, file);
+ if (!ret) {
+ /* Directory mode changed, may need to revalidate access */
+ if (S_ISDIR(entry->d_inode->i_mode) &&
+ (attr->ia_valid & ATTR_MODE))
+ fuse_invalidate_entry_cache(entry);
+ }
+ return ret;
}
static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
return ret;
}
+static int fuse_verify_xattr_list(char *list, size_t size)
+{
+ size_t origsize = size;
+
+ while (size) {
+ size_t thislen = strnlen(list, size);
+
+ if (!thislen || thislen == size)
+ return -EIO;
+
+ size -= thislen + 1;
+ list += thislen + 1;
+ }
+
+ return origsize;
+}
+
static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
{
struct inode *inode = entry->d_inode;
}
fuse_request_send(fc, req);
ret = req->out.h.error;
- if (!ret)
+ if (!ret) {
ret = size ? req->out.args[0].size : outarg.size;
- else {
+ if (ret > 0 && size)
+ ret = fuse_verify_xattr_list(list, ret);
+ } else {
if (ret == -ENOSYS) {
fc->no_listxattr = 1;
ret = -EOPNOTSUPP;
struct fuse_req *req = ff->reserved_req;
if (sync) {
+ req->force = 1;
fuse_request_send(ff->fc, req);
path_put(&req->misc.release.path);
fuse_put_request(ff->fc, req);
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
return -ENOMEM;
}
- if (!vma || addr + len <= vma->vm_start)
+ if (!vma || addr + len <= vm_start_gap(vma))
return addr;
addr = ALIGN(vma->vm_end, huge_page_size(h));
}
if (ret)
goto out;
ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
+ task_lock(p);
if (p->io_context)
ret = p->io_context->ioprio;
+ task_unlock(p);
out:
return ret;
}
pri_bh = NULL;
root_found:
+ /* We don't support read-write mounts */
+ if (!(s->s_flags & MS_RDONLY)) {
+ error = -EACCES;
+ goto out_freebh;
+ }
if (joliet_level && (pri == NULL || !opt.rock)) {
/* This is the case of Joliet with the norock mount flag.
static struct dentry *isofs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- /* We don't support read-write mounts */
- if (!(flags & MS_RDONLY))
- return ERR_PTR(-EACCES);
return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
}
__blist_del_buffer(list, jh);
jh->b_jlist = BJ_None;
- if (test_clear_buffer_jbddirty(bh))
+ if (transaction && is_journal_aborted(transaction->t_journal))
+ clear_buffer_jbddirty(bh);
+ else if (test_clear_buffer_jbddirty(bh))
mark_buffer_dirty(bh); /* Expose it to the VM */
}
*/
if (!PageUptodate(page)) {
unsigned pglen = nfs_page_length(page);
- unsigned end = offset + len;
+ unsigned end = offset + copied;
if (pglen == 0) {
zero_user_segments(page, 0, offset,
&& (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
}
-/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
- * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
+/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
+ * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
* the stack.
*/
-#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
+#define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
static int buf_to_pages_noslab(const void *buf, size_t buflen,
struct page **pages, unsigned int *pgbase)
spages = pages;
do {
- len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
+ len = min_t(size_t, PAGE_SIZE, buflen);
newpage = alloc_page(GFP_KERNEL);
if (newpage == NULL)
*/
static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
- struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
+ struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, };
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
.acl_pages = pages,
.rpc_argp = &args,
.rpc_resp = &res,
};
- int ret = -ENOMEM, npages, i;
+ unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
+ int ret = -ENOMEM, i;
size_t acl_len = 0;
- npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
- /* As long as we're doing a round trip to the server anyway,
- * let's be prepared for a page of acl data. */
- if (npages == 0)
- npages = 1;
-
- /* Add an extra page to handle the bitmap returned */
- npages++;
+ if (npages > ARRAY_SIZE(pages))
+ return -ERANGE;
for (i = 0; i < npages; i++) {
pages[i] = alloc_page(GFP_KERNEL);
.rpc_argp = &arg,
.rpc_resp = &res,
};
+ unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
int ret, i;
if (!nfs4_server_supports_acls(server))
return -EOPNOTSUPP;
+ if (npages > ARRAY_SIZE(pages))
+ return -ERANGE;
i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
if (i < 0)
return i;
__func__, status);
case -ENOENT:
case -ENOMEM:
+ case -EACCES:
+ case -EROFS:
+ case -EIO:
case -ESTALE:
/*
* Open state on this file cannot be recovered
open_owner_id_maxsz + \
encode_opentype_maxsz + \
encode_claim_null_maxsz)
+#define decode_space_limit_maxsz (3)
#define decode_ace_maxsz (3 + nfs4_owner_maxsz)
#define decode_delegation_maxsz (1 + decode_stateid_maxsz + 1 + \
+ decode_space_limit_maxsz + \
decode_ace_maxsz)
#define decode_change_info_maxsz (5)
#define decode_open_maxsz (op_decode_hdr_maxsz + \
{
unsigned int len, v, hdr, dlen;
u32 max_blocksize = svc_max_payload(rqstp);
+ struct kvec *head = rqstp->rq_arg.head;
if (!(p = decode_fh(p, &args->fh)))
return 0;
args->count = ntohl(*p++);
args->stable = ntohl(*p++);
len = args->len = ntohl(*p++);
+ if ((void *)p > head->iov_base + head->iov_len)
+ return 0;
/*
* The count must equal the amount of data passed.
*/
* Check to make sure that we got the right number of
* bytes.
*/
- hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
- dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
- - hdr;
+ hdr = (void*)p - head->iov_base;
+ dlen = head->iov_len + rqstp->rq_arg.page_len - hdr;
/*
* Round the length of the data which was specified up to
* the next multiple of XDR units and then compare that
len = args->len = max_blocksize;
}
rqstp->rq_vec[0].iov_base = (void*)p;
- rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
+ rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
v = 0;
while (len > rqstp->rq_vec[v].iov_len) {
len -= rqstp->rq_vec[v].iov_len;
/* first copy and check from the first page */
old = (char*)p;
vec = &rqstp->rq_arg.head[0];
+ if ((void *)old > vec->iov_base + vec->iov_len)
+ return 0;
avail = vec->iov_len - (old - (char*)vec->iov_base);
while (len && avail && *old) {
*new++ = *old++;
return nfserr;
}
+/*
+ * A write procedure can have a large argument, and a read procedure can
+ * have a large reply, but no NFSv2 or NFSv3 procedure has argument and
+ * reply that can both be larger than a page. The xdr code has taken
+ * advantage of this assumption to be a sloppy about bounds checking in
+ * some cases. Pending a rewrite of the NFSv2/v3 xdr code to fix that
+ * problem, we enforce these assumptions here:
+ */
+static bool nfs_request_too_big(struct svc_rqst *rqstp,
+ struct svc_procedure *proc)
+{
+ /*
+ * The ACL code has more careful bounds-checking and is not
+ * susceptible to this problem:
+ */
+ if (rqstp->rq_prog != NFS_PROGRAM)
+ return false;
+ /*
+ * Ditto NFSv4 (which can in theory have argument and reply both
+ * more than a page):
+ */
+ if (rqstp->rq_vers >= 4)
+ return false;
+ /* The reply will be small, we're OK: */
+ if (proc->pc_xdrressize > 0 &&
+ proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE))
+ return false;
+
+ return rqstp->rq_arg.len > PAGE_SIZE;
+}
+
int
nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
{
rqstp->rq_vers, rqstp->rq_proc);
proc = rqstp->rq_procinfo;
+ if (nfs_request_too_big(rqstp, proc)) {
+ dprintk("nfsd: NFSv%d argument too large\n", rqstp->rq_vers);
+ *statp = rpc_garbage_args;
+ return 1;
+ }
/*
* Give the xdr decoder a chance to change this if it wants
* (necessary in the NFSv4.0 compound case)
struct nfsd_writeargs *args)
{
unsigned int len, hdr, dlen;
+ struct kvec *head = rqstp->rq_arg.head;
int v;
if (!(p = decode_fh(p, &args->fh)))
* Check to make sure that we got the right number of
* bytes.
*/
- hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
- dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
- - hdr;
+ hdr = (void*)p - head->iov_base;
+ if (hdr > head->iov_len)
+ return 0;
+ dlen = head->iov_len + rqstp->rq_arg.page_len - hdr;
/*
* Round the length of the data which was specified up to
return 0;
rqstp->rq_vec[0].iov_base = (void*)p;
- rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
+ rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
v = 0;
while (len > rqstp->rq_vec[v].iov_len) {
len -= rqstp->rq_vec[v].iov_len;
__be32 err;
int host_err;
bool get_write_count;
- int size_change = 0;
+ bool size_change = (iap->ia_valid & ATTR_SIZE);
if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
/* Get inode */
err = fh_verify(rqstp, fhp, ftype, accmode);
if (err)
- goto out;
+ return err;
if (get_write_count) {
host_err = fh_want_write(fhp);
if (host_err)
- return nfserrno(host_err);
+ goto out;
}
dentry = fhp->fh_dentry;
iap->ia_valid &= ~ATTR_MODE;
if (!iap->ia_valid)
- goto out;
+ return 0;
nfsd_sanitize_attrs(dentry, iap);
+ if (check_guard && guardtime != inode->i_ctime.tv_sec)
+ return nfserr_notsync;
+
/*
* The size case is special, it changes the file in addition to the
- * attributes.
+ * attributes, and file systems don't expect it to be mixed with
+ * "random" attribute changes. We thus split out the size change
+ * into a separate call to ->setattr, and do the rest as a separate
+ * setattr call.
*/
- if (iap->ia_valid & ATTR_SIZE) {
+ if (size_change) {
err = nfsd_get_write_access(rqstp, fhp, iap);
if (err)
- goto out;
- size_change = 1;
- }
-
- iap->ia_valid |= ATTR_CTIME;
-
- if (check_guard && guardtime != inode->i_ctime.tv_sec) {
- err = nfserr_notsync;
- goto out_put_write_access;
+ return err;
}
host_err = nfsd_break_lease(inode);
goto out_put_write_access_nfserror;
fh_lock(fhp);
+ if (size_change) {
+ /*
+ * RFC5661, Section 18.30.4:
+ * Changing the size of a file with SETATTR indirectly
+ * changes the time_modify and change attributes.
+ *
+ * (and similar for the older RFCs)
+ */
+ struct iattr size_attr = {
+ .ia_valid = ATTR_SIZE | ATTR_CTIME | ATTR_MTIME,
+ .ia_size = iap->ia_size,
+ };
+
+ host_err = notify_change(dentry, &size_attr);
+ if (host_err)
+ goto out_unlock;
+ iap->ia_valid &= ~ATTR_SIZE;
+
+ /*
+ * Avoid the additional setattr call below if the only other
+ * attribute that the client sends is the mtime, as we update
+ * it as part of the size change above.
+ */
+ if ((iap->ia_valid & ~ATTR_MTIME) == 0)
+ goto out_unlock;
+ }
+
+ iap->ia_valid |= ATTR_CTIME;
host_err = notify_change(dentry, iap);
- fh_unlock(fhp);
+out_unlock:
+ fh_unlock(fhp);
out_put_write_access_nfserror:
- err = nfserrno(host_err);
-out_put_write_access:
if (size_change)
put_write_access(inode);
- if (!err)
- commit_metadata(fhp);
out:
- return err;
+ if (!host_err)
+ commit_metadata(fhp);
+ return nfserrno(host_err);
}
#if defined(CONFIG_NFSD_V2_ACL) || \
*/
void fsnotify_unmount_inodes(struct list_head *list)
{
- struct inode *inode, *next_i, *need_iput = NULL;
+ struct inode *inode, *iput_inode = NULL;
spin_lock(&inode_sb_list_lock);
- list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
- struct inode *need_iput_tmp;
-
+ list_for_each_entry(inode, list, i_sb_list) {
/*
* We cannot __iget() an inode in state I_FREEING,
* I_WILL_FREE, or I_NEW which is fine because by that point
continue;
}
- need_iput_tmp = need_iput;
- need_iput = NULL;
-
- /* In case fsnotify_inode_delete() drops a reference. */
- if (inode != need_iput_tmp)
- __iget(inode);
- else
- need_iput_tmp = NULL;
+ __iget(inode);
spin_unlock(&inode->i_lock);
-
- /* In case the dropping of a reference would nuke next_i. */
- while (&next_i->i_sb_list != list) {
- spin_lock(&next_i->i_lock);
- if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) &&
- atomic_read(&next_i->i_count)) {
- __iget(next_i);
- need_iput = next_i;
- spin_unlock(&next_i->i_lock);
- break;
- }
- spin_unlock(&next_i->i_lock);
- next_i = list_entry(next_i->i_sb_list.next,
- struct inode, i_sb_list);
- }
-
- /*
- * We can safely drop inode_sb_list_lock here because either
- * we actually hold references on both inode and next_i or
- * end of list. Also no new inodes will be added since the
- * umount has begun.
- */
spin_unlock(&inode_sb_list_lock);
- if (need_iput_tmp)
- iput(need_iput_tmp);
+ if (iput_inode)
+ iput(iput_inode);
/* for each watch, send FS_UNMOUNT and then remove it */
fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
fsnotify_inode_delete(inode);
- iput(inode);
+ iput_inode = inode;
spin_lock(&inode_sb_list_lock);
}
spin_unlock(&inode_sb_list_lock);
+
+ if (iput_inode)
+ iput(iput_inode);
}
mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
lockres->l_level, new_level);
+ /*
+ * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
+ * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
+ * we can recover correctly from node failure. Otherwise, we may get
+ * invalid LVB in LKB, but without DLM_SBF_VALNOTVALIDÂ being set.
+ */
+ if (!ocfs2_is_o2cb_active() &&
+ lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
+ lvb = 1;
+
if (lvb)
dlm_flags |= DLM_LKF_VALBLK;
*/
static struct ocfs2_stack_plugin *active_stack;
+inline int ocfs2_is_o2cb_active(void)
+{
+ return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
+}
+EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
+
static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
{
struct ocfs2_stack_plugin *p;
int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
+/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
+int ocfs2_is_o2cb_active(void);
+
#endif /* STACKGLUE_H */
/* We don't show the stack guard page in /proc/maps */
start = vma->vm_start;
- if (stack_guard_page_start(vma, start))
- start += PAGE_SIZE;
end = vma->vm_end;
- if (stack_guard_page_end(vma, end))
- end -= PAGE_SIZE;
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
start,
static int reiserfs_quota_on_mount(struct super_block *, int);
#endif
-/* look for uncompleted unlinks and truncates and complete them */
+/*
+ * Look for uncompleted unlinks and truncates and complete them
+ *
+ * Called with superblock write locked. If quotas are enabled, we have to
+ * release/retake lest we call dquot_quota_on_mount(), proceed to
+ * schedule_on_each_cpu() in invalidate_bdev() and deadlock waiting for the per
+ * cpu worklets to complete flush_async_commits() that in turn wait for the
+ * superblock write lock.
+ */
static int finish_unfinished(struct super_block *s)
{
INITIALIZE_PATH(path);
quota_enabled[i] = 0;
continue;
}
+ reiserfs_write_unlock(s);
ret = reiserfs_quota_on_mount(s, i);
+ reiserfs_write_lock(s);
if (ret < 0)
reiserfs_warning(s, "reiserfs-2500",
"cannot turn on journaled "
buf->len = spd->partial[page_nr].len;
buf->private = spd->partial[page_nr].private;
buf->ops = spd->ops;
+ buf->flags = 0;
if (spd->flags & SPLICE_F_GIFT)
buf->flags |= PIPE_BUF_FLAG_GIFT;
*/
static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
{
- int err, over = 0;
+ int err = 0, over = 0;
loff_t pos = file->f_pos;
struct qstr nm;
union ubifs_key key;
}
out:
- if (err != -ENOENT) {
+ if (err != -ENOENT)
ubifs_err("cannot find next direntry, error %d", err);
- return err;
- }
+ else
+ /*
+ * -ENOENT is a non-fatal error in this context, the TNC uses
+ * it to indicate that the cursor moved past the current directory
+ * and readdir() has to stop.
+ */
+ err = 0;
+
kfree(file->private_data);
file->private_data = NULL;
/* 2 is a special value indicating that there are no more direntries */
file->f_pos = 2;
- return 0;
+ return err;
}
static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int origin)
#include <linux/slab.h>
#include "ubifs.h"
+static int try_read_node(const struct ubifs_info *c, void *buf, int type,
+ int len, int lnum, int offs);
+static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
+ struct ubifs_zbranch *zbr, void *node);
+
/*
* Returned codes of 'matches_name()' and 'fallible_matches_name()' functions.
* @NAME_LESS: name corresponding to the first argument is less than second
return 0;
}
- err = ubifs_tnc_read_node(c, zbr, node);
+ if (c->replaying) {
+ err = fallible_read_node(c, &zbr->key, zbr, node);
+ /*
+ * When the node was not found, return -ENOENT, 0 otherwise.
+ * Negative return codes stay as-is.
+ */
+ if (err == 0)
+ err = -ENOENT;
+ else if (err == 1)
+ err = 0;
+ } else {
+ err = ubifs_tnc_read_node(c, zbr, node);
+ }
if (err)
return err;
if (nm->name) {
if (err) {
/* Handle collisions */
- err = resolve_collision(c, key, &znode, &n, nm);
+ if (c->replaying)
+ err = fallible_resolve_collision(c, key, &znode, &n,
+ nm, 0);
+ else
+ err = resolve_collision(c, key, &znode, &n, nm);
dbg_tnc("rc returned %d, znode %p, n %d",
err, znode, n);
if (unlikely(err < 0))
host_ui->xattr_cnt -= 1;
host_ui->xattr_size -= CALC_DENT_SIZE(nm->len);
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
+ host_ui->xattr_names -= nm->len;
mutex_unlock(&host_ui->ui_mutex);
out_free:
make_bad_inode(inode);
host_ui->xattr_cnt += 1;
host_ui->xattr_size += CALC_DENT_SIZE(nm->len);
host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
+ host_ui->xattr_names += nm->len;
mutex_unlock(&host_ui->ui_mutex);
ubifs_release_budget(c, &req);
make_bad_inode(inode);
int error = 0;
int aforkblks = 0;
int taforkblks = 0;
+ xfs_extnum_t nextents;
__uint64_t tmp;
tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
* pointer. Otherwise it's already NULL or
* pointing to the extent.
*/
- if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
+ nextents = ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+ if (nextents <= XFS_INLINE_EXTS) {
ifp->if_u1.if_extents =
ifp->if_u2.if_inline_ext;
}
* pointer. Otherwise it's already NULL or
* pointing to the extent.
*/
- if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
+ nextents = tip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+ if (nextents <= XFS_INLINE_EXTS) {
tifp->if_u1.if_extents =
tifp->if_u2.if_inline_ext;
}
return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
}
+static inline void ahash_request_complete(struct ahash_request *req, int err)
+{
+ req->base.complete(&req->base, err);
+}
+
+static inline u32 ahash_request_flags(struct ahash_request *req)
+{
+ return req->base.flags;
+}
+
static inline struct crypto_ahash *crypto_spawn_ahash(
struct crypto_ahash_spawn *spawn)
{
};
#define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */
+#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */
#endif /* CAN_H */
extern int can_proto_register(const struct can_proto *cp);
extern void can_proto_unregister(const struct can_proto *cp);
-extern int can_rx_register(struct net_device *dev, canid_t can_id,
- canid_t mask,
- void (*func)(struct sk_buff *, void *),
- void *data, char *ident);
+int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+ void (*func)(struct sk_buff *, void *),
+ void *data, char *ident, struct sock *sk);
extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
canid_t mask,
{ .notifier_call = fn, .priority = pri }; \
register_cpu_notifier(&fn##_nb); \
}
-#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
-#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
-#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
-#ifdef CONFIG_HOTPLUG_CPU
extern int register_cpu_notifier(struct notifier_block *nb);
extern void unregister_cpu_notifier(struct notifier_block *nb);
-#else
-#ifndef MODULE
-extern int register_cpu_notifier(struct notifier_block *nb);
-#else
+#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
+#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
+
static inline int register_cpu_notifier(struct notifier_block *nb)
{
return 0;
}
-#endif
static inline void unregister_cpu_notifier(struct notifier_block *nb)
{
return fp->len * sizeof(struct sock_filter) + sizeof(*fp);
}
-extern int sk_filter(struct sock *sk, struct sk_buff *skb);
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
+static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
+{
+ return sk_filter_trim_cap(sk, skb, 1);
+}
extern unsigned int sk_run_filter(const struct sk_buff *skb,
const struct sock_filter *filter);
extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
#define list_first_entry(ptr, type, member) \
list_entry((ptr)->next, type, member)
+/**
+ * list_first_entry_or_null - get the first element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ *
+ * Note that if the list is empty, it returns NULL.
+ */
+#define list_first_entry_or_null(ptr, type, member) \
+ (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
+
/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
static inline int nlm_compare_locks(const struct file_lock *fl1,
const struct file_lock *fl2)
{
- return fl1->fl_pid == fl2->fl_pid
+ return fl1->fl_file->f_dentry->d_inode == fl2->fl_file->f_dentry->d_inode
+ && fl1->fl_pid == fl2->fl_pid
&& fl1->fl_owner == fl2->fl_owner
&& fl1->fl_start == fl2->fl_start
&& fl1->fl_end == fl2->fl_end
#define lockdep_depth(tsk) (0)
-#define lockdep_assert_held(l) do { } while (0)
+#define lockdep_assert_held(l) do { (void)(l); } while (0)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
#endif /* !LOCKDEP */
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
-{
- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
-static inline int stack_guard_page_start(struct vm_area_struct *vma,
- unsigned long addr)
-{
- return (vma->vm_flags & VM_GROWSDOWN) &&
- (vma->vm_start == addr) &&
- !vma_growsdown(vma->vm_prev, addr);
-}
-
-/* Is the vma a continuation of the stack vma below it? */
-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
-{
- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
-}
-
-static inline int stack_guard_page_end(struct vm_area_struct *vma,
- unsigned long addr)
-{
- return (vma->vm_flags & VM_GROWSUP) &&
- (vma->vm_end == addr) &&
- !vma_growsup(vma->vm_next, addr);
-}
-
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len);
struct address_space *mapping,
struct file *filp);
+extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
return vma;
}
+static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
+{
+ unsigned long vm_start = vma->vm_start;
+
+ if (vma->vm_flags & VM_GROWSDOWN) {
+ vm_start -= stack_guard_gap;
+ if (vm_start > vma->vm_start)
+ vm_start = 0;
+ }
+ return vm_start;
+}
+
+static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
+{
+ unsigned long vm_end = vma->vm_end;
+
+ if (vma->vm_flags & VM_GROWSUP) {
+ vm_end += stack_guard_gap;
+ if (vm_end < vma->vm_end)
+ vm_end = -PAGE_SIZE;
+ }
+ return vm_end;
+}
+
static inline unsigned long vma_pages(struct vm_area_struct *vma)
{
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
return NAPI_GRO_CB(skb)->frag0_len < hlen;
}
+static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
+{
+ NAPI_GRO_CB(skb)->frag0 = NULL;
+ NAPI_GRO_CB(skb)->frag0_len = 0;
+}
+
static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
unsigned int offset)
{
if (!pskb_may_pull(skb, hlen))
return NULL;
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
+ skb_gro_frag0_invalidate(skb);
return skb->data + offset;
}
skb_headroom(skb) + len <= skb->hdr_len;
}
+static inline int skb_try_make_writable(struct sk_buff *skb,
+ unsigned int write_len)
+{
+ return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+}
+
static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
int cloned)
{
/* device generates spurious wakeup, ignore remote wakeup capability */
#define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200
+/*
+ * Device reports its bInterval as linear frames instead of the
+ * USB 2.0 calculation.
+ */
+#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11)
+
#endif /* __LINUX_USB_QUIRKS_H */
}
for (opt_iter = 6; opt_iter < opt_len;) {
+ if (opt_iter + 1 == opt_len) {
+ err_offset = opt_iter;
+ goto out;
+ }
tag_len = opt[opt_iter + 1];
if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) {
err_offset = opt_iter + 1;
* Functions for memory accounting
*/
extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
-extern void __sk_mem_reclaim(struct sock *sk);
+void __sk_mem_reclaim(struct sock *sk, int amount);
#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
if (!sk_has_account(sk))
return;
if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk);
+ __sk_mem_reclaim(sk, sk->sk_forward_alloc);
}
static inline void sk_mem_reclaim_partial(struct sock *sk)
if (!sk_has_account(sk))
return;
if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk);
+ __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
}
static inline void sk_mem_charge(struct sock *sk, int size)
if (!sk_has_account(sk))
return;
sk->sk_forward_alloc += size;
+
+ /* Avoid a possible overflow.
+ * TCP send queues can make this happen, if sk_mem_reclaim()
+ * is not called and more than 2 GBytes are released at once.
+ *
+ * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
+ * no need to hold that much forward allocation anyway.
+ */
+ if (unlikely(sk->sk_forward_alloc >= 1 << 21))
+ __sk_mem_reclaim(sk, 1 << 20);
}
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
gfp_t priority);
extern void sock_wfree(struct sk_buff *skb);
extern void sock_rfree(struct sk_buff *skb);
+void sock_efree(struct sk_buff *skb);
extern int sock_setsockopt(struct socket *sock, int level,
int op, char __user *optval,
sk_free(sk);
}
-extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
- const int nested);
+int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
+ unsigned int trim_cap);
+static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+ const int nested)
+{
+ return __sk_receive_skb(sk, skb, nested, 1);
+}
static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
{
extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
+int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
return 1;
}
+int tcp_filter(struct sock *sk, struct sk_buff *skb);
#undef STATE_TRACE
union ib_gid sgid;
__be16 dlid;
__be16 slid;
- int raw_traffic;
+ u8 raw_traffic;
/* reserved */
__be32 flow_label;
u8 hop_limit;
u8 traffic_class;
- int reversible;
+ u8 reversible;
u8 numb_path;
__be16 pkey;
__be16 qos_class;
u8 hop_limit;
u8 scope;
u8 join_state;
- int proxy_join;
+ u8 proxy_join;
};
/* Service Record Component Mask Sec 15.2.5.14 Ver 1.1 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM raw_syscalls
+#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE syscalls
#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
goto out;
else if ((addr = (ulong)shmaddr)) {
if (addr & (SHMLBA-1)) {
- if (shmflg & SHM_RND)
- addr &= ~(SHMLBA-1); /* round down */
+ /*
+ * Round down to the nearest multiple of shmlba.
+ * For sane do_mmap_pgoff() parameters, avoid
+ * round downs that trigger nil-page and MAP_FIXED.
+ */
+ if ((shmflg & SHM_RND) && addr >= SHMLBA)
+ addr &= ~(SHMLBA - 1);
else
#ifndef __ARCH_FORCE_SHMLBA
if (addr & ~PAGE_MASK)
return __cpu_notify(val, v, -1, NULL);
}
-#ifdef CONFIG_HOTPLUG_CPU
-
-static void cpu_notify_nofail(unsigned long val, void *v)
-{
- BUG_ON(cpu_notify(val, v));
-}
EXPORT_SYMBOL(register_cpu_notifier);
void __ref unregister_cpu_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_cpu_notifier);
+#ifdef CONFIG_HOTPLUG_CPU
+
+static void cpu_notify_nofail(unsigned long val, void *v)
+{
+ BUG_ON(cpu_notify(val, v));
+}
+
static inline void check_for_tasks(int cpu)
{
struct task_struct *p;
}
}
+/*
+ * Because of perf_event::ctx migration in sys_perf_event_open::move_group we
+ * need some magic.
+ *
+ * Those places that change perf_event::ctx will hold both
+ * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
+ *
+ * Lock ordering is by mutex address. There is one other site where
+ * perf_event_context::mutex nests and that is put_event(). But remember that
+ * that is a parent<->child context relation, and migration does not affect
+ * children, therefore these two orderings should not interact.
+ *
+ * The change in perf_event::ctx does not affect children (as claimed above)
+ * because the sys_perf_event_open() case will install a new event and break
+ * the ctx parent<->child relation.
+ *
+ * The places that change perf_event::ctx will issue:
+ *
+ * perf_remove_from_context();
+ * synchronize_rcu();
+ * perf_install_in_context();
+ *
+ * to affect the change. The remove_from_context() + synchronize_rcu() should
+ * quiesce the event, after which we can install it in the new location. This
+ * means that only external vectors (perf_fops, prctl) can perturb the event
+ * while in transit. Therefore all such accessors should also acquire
+ * perf_event_context::mutex to serialize against this.
+ *
+ * However; because event->ctx can change while we're waiting to acquire
+ * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
+ * function.
+ *
+ * Lock order:
+ * task_struct::perf_event_mutex
+ * perf_event_context::mutex
+ * perf_event_context::lock
+ * perf_event::child_mutex;
+ * perf_event::mmap_mutex
+ * mmap_sem
+ */
+static struct perf_event_context *perf_event_ctx_lock(struct perf_event *event)
+{
+ struct perf_event_context *ctx;
+
+again:
+ rcu_read_lock();
+ ctx = ACCESS_ONCE(event->ctx);
+ if (!atomic_inc_not_zero(&ctx->refcount)) {
+ rcu_read_unlock();
+ goto again;
+ }
+ rcu_read_unlock();
+
+ mutex_lock(&ctx->mutex);
+ if (event->ctx != ctx) {
+ mutex_unlock(&ctx->mutex);
+ put_ctx(ctx);
+ goto again;
+ }
+
+ return ctx;
+}
+
+static void perf_event_ctx_unlock(struct perf_event *event,
+ struct perf_event_context *ctx)
+{
+ mutex_unlock(&ctx->mutex);
+ put_ctx(ctx);
+}
+
static void unclone_ctx(struct perf_event_context *ctx)
{
if (ctx->parent_ctx) {
* is the current context on this CPU and preemption is disabled,
* hence we can't get into perf_event_task_sched_out for this context.
*/
-void perf_event_disable(struct perf_event *event)
+static void _perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
raw_spin_unlock_irq(&ctx->lock);
}
+/*
+ * Strictly speaking kernel users cannot create groups and therefore this
+ * interface does not need the perf_event_ctx_lock() magic.
+ */
+void perf_event_disable(struct perf_event *event)
+{
+ struct perf_event_context *ctx;
+
+ ctx = perf_event_ctx_lock(event);
+ _perf_event_disable(event);
+ perf_event_ctx_unlock(event, ctx);
+}
+
static void perf_set_shadow_time(struct perf_event *event,
struct perf_event_context *ctx,
u64 tstamp)
* perf_event_for_each_child or perf_event_for_each as described
* for perf_event_disable.
*/
-void perf_event_enable(struct perf_event *event)
+static void _perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
raw_spin_unlock_irq(&ctx->lock);
}
-int perf_event_refresh(struct perf_event *event, int refresh)
+/*
+ * See perf_event_disable();
+ */
+void perf_event_enable(struct perf_event *event)
+{
+ struct perf_event_context *ctx;
+
+ ctx = perf_event_ctx_lock(event);
+ _perf_event_enable(event);
+ perf_event_ctx_unlock(event, ctx);
+}
+
+static int _perf_event_refresh(struct perf_event *event, int refresh)
{
/*
* not supported on inherited events
return -EINVAL;
atomic_add(refresh, &event->event_limit);
- perf_event_enable(event);
+ _perf_event_enable(event);
return 0;
}
+
+/*
+ * See perf_event_disable()
+ */
+int perf_event_refresh(struct perf_event *event, int refresh)
+{
+ struct perf_event_context *ctx;
+ int ret;
+
+ ctx = perf_event_ctx_lock(event);
+ ret = _perf_event_refresh(event, refresh);
+ perf_event_ctx_unlock(event, ctx);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(perf_event_refresh);
static void ctx_sched_out(struct perf_event_context *ctx,
rcu_read_unlock();
if (owner) {
- mutex_lock(&owner->perf_event_mutex);
+ /*
+ * If we're here through perf_event_exit_task() we're already
+ * holding ctx->mutex which would be an inversion wrt. the
+ * normal lock order.
+ *
+ * However we can safely take this lock because its the child
+ * ctx->mutex.
+ */
+ mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
+
/*
* We have to re-check the event->owner field, if it is cleared
* we raced with perf_event_exit_task(), acquiring the mutex
u64 read_format, char __user *buf)
{
struct perf_event *leader = event->group_leader, *sub;
- int n = 0, size = 0, ret = -EFAULT;
struct perf_event_context *ctx = leader->ctx;
- u64 values[5];
+ int n = 0, size = 0, ret;
u64 count, enabled, running;
+ u64 values[5];
+
+ lockdep_assert_held(&ctx->mutex);
- mutex_lock(&ctx->mutex);
count = perf_event_read_value(leader, &enabled, &running);
values[n++] = 1 + leader->nr_siblings;
size = n * sizeof(u64);
if (copy_to_user(buf, values, size))
- goto unlock;
+ return -EFAULT;
ret = size;
size = n * sizeof(u64);
if (copy_to_user(buf + ret, values, size)) {
- ret = -EFAULT;
- goto unlock;
+ return -EFAULT;
}
ret += size;
}
-unlock:
- mutex_unlock(&ctx->mutex);
return ret;
}
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct perf_event *event = file->private_data;
+ struct perf_event_context *ctx;
+ int ret;
+
+ ctx = perf_event_ctx_lock(event);
+ ret = perf_read_hw(event, buf, count);
+ perf_event_ctx_unlock(event, ctx);
- return perf_read_hw(event, buf, count);
+ return ret;
}
static unsigned int perf_poll(struct file *file, poll_table *wait)
return events;
}
-static void perf_event_reset(struct perf_event *event)
+static void _perf_event_reset(struct perf_event *event)
{
(void)perf_event_read(event);
local64_set(&event->count, 0);
struct perf_event *child;
WARN_ON_ONCE(event->ctx->parent_ctx);
+
mutex_lock(&event->child_mutex);
func(event);
list_for_each_entry(child, &event->child_list, child_list)
struct perf_event_context *ctx = event->ctx;
struct perf_event *sibling;
- WARN_ON_ONCE(ctx->parent_ctx);
- mutex_lock(&ctx->mutex);
+ lockdep_assert_held(&ctx->mutex);
+
event = event->group_leader;
perf_event_for_each_child(event, func);
func(event);
list_for_each_entry(sibling, &event->sibling_list, group_entry)
- perf_event_for_each_child(event, func);
- mutex_unlock(&ctx->mutex);
+ perf_event_for_each_child(sibling, func);
}
static int perf_event_period(struct perf_event *event, u64 __user *arg)
struct perf_event *output_event);
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
-static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
{
- struct perf_event *event = file->private_data;
void (*func)(struct perf_event *);
u32 flags = arg;
switch (cmd) {
case PERF_EVENT_IOC_ENABLE:
- func = perf_event_enable;
+ func = _perf_event_enable;
break;
case PERF_EVENT_IOC_DISABLE:
- func = perf_event_disable;
+ func = _perf_event_disable;
break;
case PERF_EVENT_IOC_RESET:
- func = perf_event_reset;
+ func = _perf_event_reset;
break;
case PERF_EVENT_IOC_REFRESH:
- return perf_event_refresh(event, arg);
+ return _perf_event_refresh(event, arg);
case PERF_EVENT_IOC_PERIOD:
return perf_event_period(event, (u64 __user *)arg);
return 0;
}
+static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct perf_event *event = file->private_data;
+ struct perf_event_context *ctx;
+ long ret;
+
+ ctx = perf_event_ctx_lock(event);
+ ret = _perf_ioctl(event, cmd, arg);
+ perf_event_ctx_unlock(event, ctx);
+
+ return ret;
+}
+
#ifdef CONFIG_COMPAT
static long perf_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
int perf_event_task_enable(void)
{
+ struct perf_event_context *ctx;
struct perf_event *event;
mutex_lock(¤t->perf_event_mutex);
- list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
- perf_event_for_each_child(event, perf_event_enable);
+ list_for_each_entry(event, ¤t->perf_event_list, owner_entry) {
+ ctx = perf_event_ctx_lock(event);
+ perf_event_for_each_child(event, _perf_event_enable);
+ perf_event_ctx_unlock(event, ctx);
+ }
mutex_unlock(¤t->perf_event_mutex);
return 0;
int perf_event_task_disable(void)
{
+ struct perf_event_context *ctx;
struct perf_event *event;
mutex_lock(¤t->perf_event_mutex);
- list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
- perf_event_for_each_child(event, perf_event_disable);
+ list_for_each_entry(event, ¤t->perf_event_list, owner_entry) {
+ ctx = perf_event_ctx_lock(event);
+ perf_event_for_each_child(event, _perf_event_disable);
+ perf_event_ctx_unlock(event, ctx);
+ }
mutex_unlock(¤t->perf_event_mutex);
return 0;
/* Recursion avoidance in each contexts */
int recursion[PERF_NR_CONTEXTS];
-
- /* Keeps track of cpu being initialized/exited */
- bool online;
};
static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
hwc->state = !(flags & PERF_EF_START);
head = find_swevent_head(swhash, event);
- if (!head) {
- /*
- * We can race with cpu hotplug code. Do not
- * WARN if the cpu just got unplugged.
- */
- WARN_ON_ONCE(swhash->online);
+ if (WARN_ON_ONCE(!head))
return -EINVAL;
- }
hlist_add_head_rcu(&event->hlist_entry, head);
int err = 0;
mutex_lock(&swhash->hlist_mutex);
-
if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
struct swevent_hlist *hlist;
return ret;
}
+static void mutex_lock_double(struct mutex *a, struct mutex *b)
+{
+ if (b < a)
+ swap(a, b);
+
+ mutex_lock(a);
+ mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
+}
+
+/*
+ * Variation on perf_event_ctx_lock_nested(), except we take two context
+ * mutexes.
+ */
+static struct perf_event_context *
+__perf_event_ctx_lock_double(struct perf_event *group_leader,
+ struct perf_event_context *ctx)
+{
+ struct perf_event_context *gctx;
+
+again:
+ rcu_read_lock();
+ gctx = ACCESS_ONCE(group_leader->ctx);
+ if (!atomic_inc_not_zero(&gctx->refcount)) {
+ rcu_read_unlock();
+ goto again;
+ }
+ rcu_read_unlock();
+
+ mutex_lock_double(&gctx->mutex, &ctx->mutex);
+
+ if (group_leader->ctx != gctx) {
+ mutex_unlock(&ctx->mutex);
+ mutex_unlock(&gctx->mutex);
+ put_ctx(gctx);
+ goto again;
+ }
+
+ return gctx;
+}
+
/**
* sys_perf_event_open - open a performance event, associate it to a task/cpu
*
struct perf_event *group_leader = NULL, *output_event = NULL;
struct perf_event *event, *sibling;
struct perf_event_attr attr;
- struct perf_event_context *ctx;
+ struct perf_event_context *ctx, *uninitialized_var(gctx);
struct file *event_file = NULL;
struct file *group_file = NULL;
struct task_struct *task = NULL;
}
if (move_group) {
- struct perf_event_context *gctx = group_leader->ctx;
+ gctx = __perf_event_ctx_lock_double(group_leader, ctx);
- mutex_lock(&gctx->mutex);
+ /*
+ * Check if we raced against another sys_perf_event_open() call
+ * moving the software group underneath us.
+ */
+ if (!(group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
+ /*
+ * If someone moved the group out from under us, check
+ * if this new event wound up on the same ctx, if so
+ * its the regular !move_group case, otherwise fail.
+ */
+ if (gctx != ctx) {
+ err = -EINVAL;
+ goto err_locked;
+ } else {
+ perf_event_ctx_unlock(group_leader, gctx);
+ move_group = 0;
+ }
+ }
+
+ /*
+ * See perf_event_ctx_lock() for comments on the details
+ * of swizzling perf_event::ctx.
+ */
perf_remove_from_context(group_leader, false);
/*
perf_event__state_init(sibling);
put_ctx(gctx);
}
- mutex_unlock(&gctx->mutex);
- put_ctx(gctx);
+ } else {
+ mutex_lock(&ctx->mutex);
}
WARN_ON_ONCE(ctx->parent_ctx);
- mutex_lock(&ctx->mutex);
if (move_group) {
+ /*
+ * Wait for everybody to stop referencing the events through
+ * the old lists, before installing it on new lists.
+ */
+ synchronize_rcu();
+
perf_install_in_context(ctx, group_leader, cpu);
get_ctx(ctx);
list_for_each_entry(sibling, &group_leader->sibling_list,
perf_install_in_context(ctx, event, cpu);
++ctx->generation;
perf_unpin_context(ctx);
+
+ if (move_group) {
+ perf_event_ctx_unlock(group_leader, gctx);
+ put_ctx(gctx);
+ }
mutex_unlock(&ctx->mutex);
event->owner = current;
fd_install(event_fd, event_file);
return event_fd;
+err_locked:
+ if (move_group)
+ perf_event_ctx_unlock(group_leader, gctx);
+ mutex_unlock(&ctx->mutex);
+ fput(event_file);
err_context:
perf_unpin_context(ctx);
put_ctx(ctx);
err_alloc:
- free_event(event);
+ /*
+ * If event_file is set, the fput() above will have called ->release()
+ * and that will take care of freeing the event.
+ */
+ if (!event_file)
+ free_event(event);
err_task:
if (task)
put_task_struct(task);
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
- break;
+ goto out_unlock;
}
/*
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
- break;
+ goto out_unlock;
}
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
}
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
+out_unlock:
mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx);
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
- swhash->online = true;
if (swhash->hlist_refcount > 0) {
struct swevent_hlist *hlist;
static void perf_event_exit_cpu(int cpu)
{
- struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
-
perf_event_exit_cpu_context(cpu);
-
- mutex_lock(&swhash->hlist_mutex);
- swhash->online = false;
- swevent_hlist_release(swhash);
- mutex_unlock(&swhash->hlist_mutex);
}
#else
static inline void perf_event_exit_cpu(int cpu) { }
{
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
- struct rt_mutex *pi_mutex = NULL;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
+ if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
+ rt_mutex_unlock(&q.pi_state->pi_mutex);
/*
* Drop the reference to the pi state which
* the requeue_pi() code acquired for us.
spin_unlock(q.lock_ptr);
}
} else {
+ struct rt_mutex *pi_mutex;
+
/*
* We have been woken up by futex_unlock_pi(), a timeout, or a
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
if (res)
ret = (res < 0) ? res : 0;
+ /*
+ * If fixup_pi_state_owner() faulted and was unable to handle
+ * the fault, unlock the rt_mutex and return the fault to
+ * userspace.
+ */
+ if (ret && rt_mutex_owner(pi_mutex) == current)
+ rt_mutex_unlock(pi_mutex);
+
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
- /*
- * If fixup_pi_state_owner() faulted and was unable to handle the
- * fault, unlock the rt_mutex and return the fault to userspace.
- */
- if (ret == -EFAULT) {
- if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
- rt_mutex_unlock(pi_mutex);
- } else if (ret == -EINTR) {
+ if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
* futex_lock_pi() directly. We could restart this syscall, but
return 0;
}
-__initcall(futex_init);
+core_initcall(futex_init);
WARN_ON(!task->ptrace || task->parent != current);
+ /*
+ * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
+ * Recheck state under the lock to close this race.
+ */
spin_lock_irq(&task->sighand->siglock);
- if (__fatal_signal_pending(task))
- wake_up_state(task, __TASK_TRACED);
- else
- task->state = TASK_TRACED;
+ if (task->state == __TASK_TRACED) {
+ if (__fatal_signal_pending(task))
+ wake_up_state(task, __TASK_TRACED);
+ else
+ task->state = TASK_TRACED;
+ }
spin_unlock_irq(&task->sighand->siglock);
}
static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
{
- if (!rt_mutex_has_waiters(lock))
- clear_rt_mutex_waiters(lock);
+ unsigned long owner, *p = (unsigned long *) &lock->owner;
+
+ if (rt_mutex_has_waiters(lock))
+ return;
+
+ /*
+ * The rbtree has no waiters enqueued, now make sure that the
+ * lock->owner still has the waiters bit set, otherwise the
+ * following can happen:
+ *
+ * CPU 0 CPU 1 CPU2
+ * l->owner=T1
+ * rt_mutex_lock(l)
+ * lock(l->lock)
+ * l->owner = T1 | HAS_WAITERS;
+ * enqueue(T2)
+ * boost()
+ * unlock(l->lock)
+ * block()
+ *
+ * rt_mutex_lock(l)
+ * lock(l->lock)
+ * l->owner = T1 | HAS_WAITERS;
+ * enqueue(T3)
+ * boost()
+ * unlock(l->lock)
+ * block()
+ * signal(->T2) signal(->T3)
+ * lock(l->lock)
+ * dequeue(T2)
+ * deboost()
+ * unlock(l->lock)
+ * lock(l->lock)
+ * dequeue(T3)
+ * ==> wait list is empty
+ * deboost()
+ * unlock(l->lock)
+ * lock(l->lock)
+ * fixup_rt_mutex_waiters()
+ * if (wait_list_empty(l) {
+ * l->owner = owner
+ * owner = l->owner & ~HAS_WAITERS;
+ * ==> l->owner = T1
+ * }
+ * lock(l->lock)
+ * rt_mutex_unlock(l) fixup_rt_mutex_waiters()
+ * if (wait_list_empty(l) {
+ * owner = l->owner & ~HAS_WAITERS;
+ * cmpxchg(l->owner, T1, NULL)
+ * ===> Success (l->owner = NULL)
+ *
+ * l->owner = owner
+ * ==> l->owner = T1
+ * }
+ *
+ * With the check for the waiter bit in place T3 on CPU2 will not
+ * overwrite. All tasks fiddling with the waiters bit are
+ * serialized by l->lock, so nothing else can modify the waiters
+ * bit. If the bit is set then nothing can change l->owner either
+ * so the simple RMW is safe. The cmpxchg() will simply fail if it
+ * happens in the middle of the RMW because the waiters bit is
+ * still set.
+ */
+ owner = ACCESS_ONCE(*p);
+ if (owner & RT_MUTEX_HAS_WAITERS)
+ ACCESS_ONCE(*p) = owner & ~RT_MUTEX_HAS_WAITERS;
}
/*
struct rq *this_rq = this_rq();
/*
- * If we're still before the sample window, we're done.
+ * If we're still before the pending sample window, we're done.
*/
+ this_rq->calc_load_update = calc_load_update;
if (time_before(jiffies, this_rq->calc_load_update))
return;
* accounted through the nohz accounting, so skip the entire deal and
* sync up for the next window.
*/
- this_rq->calc_load_update = calc_load_update;
if (time_before(jiffies, this_rq->calc_load_update + 10))
this_rq->calc_load_update += LOAD_FREQ;
}
break;
if (neg)
continue;
+ val = convmul * val / convdiv;
if ((min && val < *min) || (max && val > *max))
continue;
*i = val;
int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ struct buffer_page *reader;
+ struct buffer_page *head_page;
+ struct buffer_page *commit_page;
+ unsigned commit;
cpu_buffer = iter->cpu_buffer;
- return iter->head_page == cpu_buffer->commit_page &&
- iter->head == rb_commit_index(cpu_buffer);
+ /* Remember, trace recording is off when iterator is in use */
+ reader = cpu_buffer->reader_page;
+ head_page = cpu_buffer->head_page;
+ commit_page = cpu_buffer->commit_page;
+ commit = rb_page_commit(commit_page);
+
+ return ((iter->head_page == commit_page && iter->head == commit) ||
+ (iter->head_page == reader && commit_page == head_page &&
+ head_page->read == commit &&
+ iter->head == rb_page_commit(cpu_buffer->reader_page)));
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
arch_spin_lock(&trace_cmdline_lock);
map = map_pid_to_cmdline[pid];
if (map != NO_CMDLINE_MAP)
- strcpy(comm, saved_cmdlines[map]);
+ strlcpy(comm, saved_cmdlines[map], TASK_COMM_LEN);
else
strcpy(comm, "<...>");
struct gen_pool_chunk *chunk;
unsigned long addr = 0;
int order = pool->min_alloc_order;
- int nbits, start_bit = 0, end_bit, remain;
+ int nbits, start_bit, end_bit, remain;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
if (size > atomic_read(&chunk->avail))
continue;
+ start_bit = 0;
end_bit = (chunk->end_addr - chunk->start_addr) >> order;
retry:
start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit,
/* Reject out-of-range values early. Large positive sizes are
used for unknown buffer sizes. */
- if (WARN_ON_ONCE((int) size < 0))
+ if (WARN_ON_ONCE(size > INT_MAX))
return 0;
str = buf;
cond_resched();
find_page:
+ if (fatal_signal_pending(current)) {
+ error = -EINTR;
+ goto out;
+ }
+
page = find_get_page(mapping, index);
if (!page) {
page_cache_sync_readahead(mapping,
return ret;
}
+/*
+ * FOLL_FORCE can write to even unwritable pmd's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
+ unsigned int flags)
+{
+ return pmd_write(pmd) ||
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
+ page && PageAnon(page));
+}
+
struct page *follow_trans_huge_pmd(struct mm_struct *mm,
unsigned long addr,
pmd_t *pmd,
assert_spin_locked(&mm->page_table_lock);
- if (flags & FOLL_WRITE && !pmd_write(*pmd))
- goto out;
-
page = pmd_page(*pmd);
VM_BUG_ON(!PageHead(page));
+
+ if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, page, flags))
+ return NULL;
+
if (flags & FOLL_TOUCH) {
pmd_t _pmd;
/*
return page;
}
-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
-{
- return stack_guard_page_start(vma, addr) ||
- stack_guard_page_end(vma, addr+PAGE_SIZE);
-}
-
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
int ret;
unsigned int fault_flags = 0;
- /* For mlock, just skip the stack guard page. */
- if (foll_flags & FOLL_MLOCK) {
- if (stack_guard_page(vma, start))
- goto next_page;
- }
if (foll_flags & FOLL_WRITE)
fault_flags |= FAULT_FLAG_WRITE;
if (nonblocking)
return ret;
}
-/*
- * This is like a special single-page "expand_{down|up}wards()",
- * except we must first make sure that 'address{-|+}PAGE_SIZE'
- * doesn't hit another vma.
- */
-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
-{
- address &= PAGE_MASK;
- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
- struct vm_area_struct *prev = vma->vm_prev;
-
- /*
- * Is there a mapping abutting this one below?
- *
- * That's only ok if it's the same stack mapping
- * that has gotten split..
- */
- if (prev && prev->vm_end == address)
- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
-
- return expand_downwards(vma, address - PAGE_SIZE);
- }
- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
- struct vm_area_struct *next = vma->vm_next;
-
- /* As VM_GROWSDOWN but s/below/above/ */
- if (next && next->vm_start == address + PAGE_SIZE)
- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
-
- return expand_upwards(vma, address + PAGE_SIZE);
- }
- return 0;
-}
-
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
if (vma->vm_flags & VM_SHARED)
return VM_FAULT_SIGBUS;
- /* Check if we need to add a guard page to the stack */
- if (check_stack_guard_page(vma, address) < 0)
- return VM_FAULT_SIGSEGV;
-
/* Use the zero-page for reads */
if (!(flags & FAULT_FLAG_WRITE)) {
entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
compat_ulong_t maxnode)
{
- long err = 0;
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
- err = compat_get_bitmap(bm, nmask, nr_bits);
+ if (compat_get_bitmap(bm, nmask, nr_bits))
+ return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
- err |= copy_to_user(nm, bm, alloc_size);
+ if (copy_to_user(nm, bm, alloc_size))
+ return -EFAULT;
}
- if (err)
- return -EFAULT;
-
return sys_set_mempolicy(mode, nm, nr_bits+1);
}
compat_ulong_t mode, compat_ulong_t __user *nmask,
compat_ulong_t maxnode, compat_ulong_t flags)
{
- long err = 0;
unsigned long __user *nm = NULL;
unsigned long nr_bits, alloc_size;
nodemask_t bm;
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask) {
- err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
+ if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
+ return -EFAULT;
nm = compat_alloc_user_space(alloc_size);
- err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
+ if (copy_to_user(nm, nodes_addr(bm), alloc_size))
+ return -EFAULT;
}
- if (err)
- return -EFAULT;
-
return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
}
unsigned long rlim, retval;
unsigned long newbrk, oldbrk;
struct mm_struct *mm = current->mm;
+ struct vm_area_struct *next;
unsigned long min_brk;
down_write(&mm->mmap_sem);
}
/* Check against existing mmap mappings. */
- if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
+ next = find_vma(mm, oldbrk);
+ if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
goto out;
/* Ok, looks good - let it rip. */
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long start_addr;
+ struct vm_area_struct *vma, *prev;
+ unsigned long start_addr, vm_start, prev_end;
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
return addr;
}
if (len > mm->cached_hole_size) {
}
full_search:
- for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ for (vma = find_vma_prev(mm, addr, &prev); ; prev = vma,
+ vma = vma->vm_next) {
+ if (prev) {
+ prev_end = vm_end_gap(prev);
+ if (addr < prev_end) {
+ addr = prev_end;
+ /* If vma already violates gap, forget it */
+ if (vma && addr > vma->vm_start)
+ addr = vma->vm_start;
+ }
+ }
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr) {
/*
}
return -ENOMEM;
}
- if (!vma || addr + len <= vma->vm_start) {
+ vm_start = vma ? vm_start_gap(vma) : TASK_SIZE;
+ if (addr + len <= vm_start) {
/*
* Remember the place where we stopped the search:
*/
mm->free_area_cache = addr + len;
return addr;
}
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
- addr = vma->vm_end;
+ if (addr + mm->cached_hole_size < vm_start)
+ mm->cached_hole_size = vm_start - addr;
}
}
#endif
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
+ unsigned long vm_start, prev_end;
unsigned long low_limit = max(PAGE_SIZE, mmap_min_addr);
/* requested length too big for entire address space */
/* requesting a specific address */
if (addr) {
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
return addr;
}
/* make sure it can fit in the remaining address space */
if (addr >= low_limit + len) {
- vma = find_vma(mm, addr-len);
- if (!vma || addr <= vma->vm_start)
+ vma = find_vma_prev(mm, addr-len, &prev);
+ if ((!vma || addr <= vm_start_gap(vma)) &&
+ (!prev || addr-len >= vm_end_gap(prev)))
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr-len);
}
* else if new region fits below vma->vm_start,
* return with success:
*/
- vma = find_vma(mm, addr);
- if (!vma || addr+len <= vma->vm_start)
+ vma = find_vma_prev(mm, addr, &prev);
+ vm_start = vma ? vm_start_gap(vma) : mm->mmap_base;
+ prev_end = prev ? vm_end_gap(prev) : low_limit;
+
+ if (addr + len <= vm_start && addr >= prev_end)
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr);
/* remember the largest hole we saw so far */
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
+ if (addr + mm->cached_hole_size < vm_start)
+ mm->cached_hole_size = vm_start - addr;
/* try just below the current vma->vm_start */
- addr = vma->vm_start-len;
- } while (vma->vm_start >= low_limit + len);
+ addr = vm_start - len;
+ } while (vm_start >= low_limit + len);
bottomup:
/*
EXPORT_SYMBOL(find_vma);
-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
+/*
+ * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
+ */
struct vm_area_struct *
find_vma_prev(struct mm_struct *mm, unsigned long addr,
struct vm_area_struct **pprev)
{
- struct vm_area_struct *vma = NULL, *prev = NULL;
- struct rb_node *rb_node;
- if (!mm)
- goto out;
-
- /* Guard against addr being lower than the first VMA */
- vma = mm->mmap;
-
- /* Go through the RB tree quickly. */
- rb_node = mm->mm_rb.rb_node;
-
- while (rb_node) {
- struct vm_area_struct *vma_tmp;
- vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
+ struct vm_area_struct *vma;
- if (addr < vma_tmp->vm_end) {
- rb_node = rb_node->rb_left;
- } else {
- prev = vma_tmp;
- if (!prev->vm_next || (addr < prev->vm_next->vm_end))
- break;
+ vma = find_vma(mm, addr);
+ if (vma) {
+ *pprev = vma->vm_prev;
+ } else {
+ struct rb_node *rb_node = mm->mm_rb.rb_node;
+ *pprev = NULL;
+ while (rb_node) {
+ *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
rb_node = rb_node->rb_right;
}
}
-
-out:
- *pprev = prev;
- return prev ? prev->vm_next : vma;
+ return vma;
}
/*
* update accounting. This is shared with both the
* grow-up and grow-down cases.
*/
-static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
+static int acct_stack_growth(struct vm_area_struct *vma,
+ unsigned long size, unsigned long grow)
{
struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim;
- unsigned long new_start, actual_size;
+ unsigned long new_start;
/* address space limit tests */
if (!may_expand_vm(mm, grow))
return -ENOMEM;
/* Stack limit test */
- actual_size = size;
- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
- actual_size -= PAGE_SIZE;
- if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
/* mlock limit tests */
*/
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
- int error;
+ struct vm_area_struct *next;
+ unsigned long gap_addr;
+ int error = 0;
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
- /*
- * We must make sure the anon_vma is allocated
- * so that the anon_vma locking is not a noop.
- */
+ /* Guard against exceeding limits of the address space. */
+ address &= PAGE_MASK;
+ if (address >= TASK_SIZE)
+ return -ENOMEM;
+ address += PAGE_SIZE;
+
+ /* Enforce stack_guard_gap */
+ gap_addr = address + stack_guard_gap;
+
+ /* Guard against overflow */
+ if (gap_addr < address || gap_addr > TASK_SIZE)
+ gap_addr = TASK_SIZE;
+
+ next = vma->vm_next;
+ if (next && next->vm_start < gap_addr) {
+ if (!(next->vm_flags & VM_GROWSUP))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
+
+ /* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
- vma_lock_anon_vma(vma);
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
- * Also guard against wrapping around to address 0.
*/
- if (address < PAGE_ALIGN(address+4))
- address = PAGE_ALIGN(address+4);
- else {
- vma_unlock_anon_vma(vma);
- return -ENOMEM;
- }
- error = 0;
+ vma_lock_anon_vma(vma);
/* Somebody else might have raced and expanded it already */
if (address > vma->vm_end) {
int expand_downwards(struct vm_area_struct *vma,
unsigned long address)
{
+ struct vm_area_struct *prev;
+ unsigned long gap_addr;
int error;
- /*
- * We must make sure the anon_vma is allocated
- * so that the anon_vma locking is not a noop.
- */
- if (unlikely(anon_vma_prepare(vma)))
- return -ENOMEM;
-
address &= PAGE_MASK;
error = security_file_mmap(NULL, 0, 0, 0, address, 1);
if (error)
return error;
- vma_lock_anon_vma(vma);
+ /* Enforce stack_guard_gap */
+ gap_addr = address - stack_guard_gap;
+ if (gap_addr > address)
+ return -ENOMEM;
+ prev = vma->vm_prev;
+ if (prev && prev->vm_end > gap_addr) {
+ if (!(prev->vm_flags & VM_GROWSDOWN))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
+
+ /* We must make sure the anon_vma is allocated. */
+ if (unlikely(anon_vma_prepare(vma)))
+ return -ENOMEM;
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
*/
+ vma_lock_anon_vma(vma);
/* Somebody else might have raced and expanded it already */
if (address < vma->vm_start) {
return error;
}
+/* enforced gap between the expanding stack and other mappings. */
+unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
+
+static int __init cmdline_parse_stack_guard_gap(char *p)
+{
+ unsigned long val;
+ char *endptr;
+
+ val = simple_strtoul(p, &endptr, 10);
+ if (!*endptr)
+ stack_guard_gap = val << PAGE_SHIFT;
+
+ return 0;
+}
+__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
+
#ifdef CONFIG_STACK_GROWSUP
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
swab32s(&swap_header->info.version);
swab32s(&swap_header->info.last_page);
swab32s(&swap_header->info.nr_badpages);
+ if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
+ return 0;
for (i = 0; i < swap_header->info.nr_badpages; i++)
swab32s(&swap_header->info.badpages[i]);
}
trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
+ if (rsize < count) {
+ pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize);
+ count = rsize;
+ }
P9_DPRINTK(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
#include <asm/uaccess.h>
#include "br_private.h"
-/* called with RTNL */
static int get_bridge_ifindices(struct net *net, int *indices, int num)
{
struct net_device *dev;
int i = 0;
- for_each_netdev(net, dev) {
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev) {
if (i >= num)
break;
if (dev->priv_flags & IFF_EBRIDGE)
indices[i++] = dev->ifindex;
}
+ rcu_read_unlock();
return i;
}
* @func: callback function on filter match
* @data: returned parameter for callback function
* @ident: string for calling module indentification
+ * @sk: socket pointer (might be NULL)
*
* Description:
* Invokes the callback function with the received sk_buff and the given
*/
int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
void (*func)(struct sk_buff *, void *), void *data,
- char *ident)
+ char *ident, struct sock *sk)
{
struct receiver *r;
struct hlist_head *rl;
r->func = func;
r->data = data;
r->ident = ident;
+ r->sk = sk;
hlist_add_head_rcu(&r->list, rl);
d->entries++;
static void can_rx_delete_receiver(struct rcu_head *rp)
{
struct receiver *r = container_of(rp, struct receiver, rcu);
+ struct sock *sk = r->sk;
kmem_cache_free(rcv_cache, r);
+ if (sk)
+ sock_put(sk);
}
/**
spin_unlock(&can_rcvlists_lock);
/* schedule the receiver item for deletion */
- if (r)
+ if (r) {
+ if (r->sk)
+ sock_hold(r->sk);
call_rcu(&r->rcu, can_rx_delete_receiver);
+ }
}
EXPORT_SYMBOL(can_rx_unregister);
struct receiver {
struct hlist_node list;
- struct rcu_head rcu;
canid_t can_id;
canid_t mask;
unsigned long matches;
void (*func)(struct sk_buff *, void *);
void *data;
char *ident;
+ struct sock *sk;
+ struct rcu_head rcu;
};
enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX };
static void bcm_remove_op(struct bcm_op *op)
{
- hrtimer_cancel(&op->timer);
- hrtimer_cancel(&op->thrtimer);
-
- if (op->tsklet.func)
- tasklet_kill(&op->tsklet);
+ if (op->tsklet.func) {
+ while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
+ test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
+ hrtimer_active(&op->timer)) {
+ hrtimer_cancel(&op->timer);
+ tasklet_kill(&op->tsklet);
+ }
+ }
- if (op->thrtsklet.func)
- tasklet_kill(&op->thrtsklet);
+ if (op->thrtsklet.func) {
+ while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
+ test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
+ hrtimer_active(&op->thrtimer)) {
+ hrtimer_cancel(&op->thrtimer);
+ tasklet_kill(&op->thrtsklet);
+ }
+ }
if ((op->frames) && (op->frames != &op->sframe))
kfree(op->frames);
err = can_rx_register(dev, op->can_id,
REGMASK(op->can_id),
bcm_rx_handler, op,
- "bcm");
+ "bcm", sk);
op->rx_reg_dev = dev;
dev_put(dev);
} else
err = can_rx_register(NULL, op->can_id,
REGMASK(op->can_id),
- bcm_rx_handler, op, "bcm");
+ bcm_rx_handler, op, "bcm", sk);
if (err) {
/* this bcm rx op is broken -> remove it */
list_del(&op->list);
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
struct sock *sk = sock->sk;
struct bcm_sock *bo = bcm_sk(sk);
+ int ret = 0;
if (len < sizeof(*addr))
return -EINVAL;
- if (bo->bound)
- return -EISCONN;
+ lock_sock(sk);
+
+ if (bo->bound) {
+ ret = -EISCONN;
+ goto fail;
+ }
/* bind a device to this socket */
if (addr->can_ifindex) {
struct net_device *dev;
dev = dev_get_by_index(&init_net, addr->can_ifindex);
- if (!dev)
- return -ENODEV;
-
+ if (!dev) {
+ ret = -ENODEV;
+ goto fail;
+ }
if (dev->type != ARPHRD_CAN) {
dev_put(dev);
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail;
}
bo->ifindex = dev->ifindex;
bo->ifindex = 0;
}
- bo->bound = 1;
-
if (proc_dir) {
/* unique socket address as filename */
sprintf(bo->procname, "%lu", sock_i_ino(sk));
bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
proc_dir,
&bcm_proc_fops, sk);
+ if (!bo->bcm_proc_read) {
+ ret = -ENOMEM;
+ goto fail;
+ }
}
- return 0;
+ bo->bound = 1;
+
+fail:
+ release_sock(sk);
+
+ return ret;
}
static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
{
return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
gwj->ccgw.filter.can_mask, can_can_gw_rcv,
- gwj, "gw");
+ gwj, "gw", NULL);
}
static inline void cgw_unregister_filter(struct cgw_job *gwj)
for (i = 0; i < count; i++) {
err = can_rx_register(dev, filter[i].can_id,
filter[i].can_mask,
- raw_rcv, sk, "raw");
+ raw_rcv, sk, "raw", sk);
if (err) {
/* clean up successfully registered filters */
while (--i >= 0)
if (err_mask)
err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
- raw_rcv, sk, "raw");
+ raw_rcv, sk, "raw", sk);
return err;
}
if (optlen % sizeof(struct can_filter) != 0)
return -EINVAL;
+ if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
+ return -EINVAL;
+
count = optlen / sizeof(struct can_filter);
if (count > 1) {
dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
+ if (con->auth_reply_buf) {
+ /*
+ * Any connection that defines ->get_authorizer()
+ * should also define ->verify_authorizer_reply().
+ * See get_connect_authorizer().
+ */
+ ret = con->ops->verify_authorizer_reply(con, 0);
+ if (ret < 0) {
+ con->error_msg = "bad authorize reply";
+ return ret;
+ }
+ }
+
switch (con->in_reply.tag) {
case CEPH_MSGR_TAG_FEATURES:
pr_err("%s%lld %s feature set mismatch,"
goto out;
}
- *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+ *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
out_set_summed:
skb->ip_summed = CHECKSUM_NONE;
out:
!PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
NAPI_GRO_CB(skb)->frag0 =
skb_frag_address(&skb_shinfo(skb)->frags[0]);
- NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
+ NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
+ skb_frag_size(&skb_shinfo(skb)->frags[0]),
+ skb->end - skb->tail);
}
}
EXPORT_SYMBOL(skb_gro_reset_offset);
struct nlattr *nla;
struct sk_buff *skb;
unsigned long flags;
+ void *msg_header;
al = sizeof(struct net_dm_alert_msg);
al += dm_hit_limit * sizeof(struct net_dm_drop_point);
skb = genlmsg_new(al, GFP_KERNEL);
- if (skb) {
- genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
- 0, NET_DM_CMD_ALERT);
- nla = nla_reserve(skb, NLA_UNSPEC,
- sizeof(struct net_dm_alert_msg));
- msg = nla_data(nla);
- memset(msg, 0, al);
- } else {
- mod_timer(&data->send_timer, jiffies + HZ / 10);
+ if (!skb)
+ goto err;
+
+ msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
+ 0, NET_DM_CMD_ALERT);
+ if (!msg_header) {
+ nlmsg_free(skb);
+ skb = NULL;
+ goto err;
+ }
+ nla = nla_reserve(skb, NLA_UNSPEC,
+ sizeof(struct net_dm_alert_msg));
+ if (!nla) {
+ nlmsg_free(skb);
+ skb = NULL;
+ goto err;
}
+ msg = nla_data(nla);
+ memset(msg, 0, al);
+ goto out;
+err:
+ mod_timer(&data->send_timer, jiffies + HZ / 10);
+out:
spin_lock_irqsave(&data->lock, flags);
swap(data->skb, skb);
spin_unlock_irqrestore(&data->lock, flags);
+ if (skb) {
+ struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+ struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh);
+
+ genlmsg_end(skb, genlmsg_data(gnlh));
+ }
+
return skb;
}
}
/**
- * sk_filter - run a packet through a socket filter
+ * sk_filter_trim_cap - run a packet through a socket filter
* @sk: sock associated with &sk_buff
* @skb: buffer to filter
+ * @cap: limit on how short the eBPF program may trim the packet
*
* Run the filter code and then cut skb->data to correct size returned by
* sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
* be accepted or -EPERM if the packet should be tossed.
*
*/
-int sk_filter(struct sock *sk, struct sk_buff *skb)
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
{
int err;
struct sk_filter *filter;
filter = rcu_dereference(sk->sk_filter);
if (filter) {
unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
-
- err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
+ err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
}
rcu_read_unlock();
return err;
}
-EXPORT_SYMBOL(sk_filter);
+EXPORT_SYMBOL(sk_filter_trim_cap);
/**
* sk_run_filter - run a filter on a socket
rtnl_msg_handlers[protocol][msgindex].doit = NULL;
rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
+ rtnl_msg_handlers[protocol][msgindex].calcit = NULL;
return 0;
}
}
-int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
- int err;
int skb_len;
unsigned long flags;
struct sk_buff_head *list = &sk->sk_receive_queue;
return -ENOMEM;
}
- err = sk_filter(sk, skb);
- if (err)
- return err;
-
if (!sk_rmem_schedule(sk, skb->truesize)) {
atomic_inc(&sk->sk_drops);
return -ENOBUFS;
sk->sk_data_ready(sk, skb_len);
return 0;
}
+EXPORT_SYMBOL(__sock_queue_rcv_skb);
+
+int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ int err;
+
+ err = sk_filter(sk, skb);
+ if (err)
+ return err;
+
+ return __sock_queue_rcv_skb(sk, skb);
+}
EXPORT_SYMBOL(sock_queue_rcv_skb);
-int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+ const int nested, unsigned int trim_cap)
{
int rc = NET_RX_SUCCESS;
- if (sk_filter(sk, skb))
+ if (sk_filter_trim_cap(sk, skb, trim_cap))
goto discard_and_relse;
skb->dev = NULL;
kfree_skb(skb);
goto out;
}
-EXPORT_SYMBOL(sk_receive_skb);
+EXPORT_SYMBOL(__sk_receive_skb);
void sk_reset_txq(struct sock *sk)
{
break;
case SO_SNDBUF:
/* Don't error on this BSD doesn't and if you think
- about it this is right. Otherwise apps have to
- play 'guess the biggest size' games. RCVBUF/SNDBUF
- are treated in BSD as hints */
-
- if (val > sysctl_wmem_max)
- val = sysctl_wmem_max;
+ * about it this is right. Otherwise apps have to
+ * play 'guess the biggest size' games. RCVBUF/SNDBUF
+ * are treated in BSD as hints
+ */
+ val = min_t(u32, val, sysctl_wmem_max);
set_sndbuf:
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
- if ((val * 2) < SOCK_MIN_SNDBUF)
- sk->sk_sndbuf = SOCK_MIN_SNDBUF;
- else
- sk->sk_sndbuf = val * 2;
-
- /*
- * Wake up sending tasks if we
- * upped the value.
- */
+ sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
+ /* Wake up sending tasks if we upped the value. */
sk->sk_write_space(sk);
break;
case SO_RCVBUF:
/* Don't error on this BSD doesn't and if you think
- about it this is right. Otherwise apps have to
- play 'guess the biggest size' games. RCVBUF/SNDBUF
- are treated in BSD as hints */
-
- if (val > sysctl_rmem_max)
- val = sysctl_rmem_max;
+ * about it this is right. Otherwise apps have to
+ * play 'guess the biggest size' games. RCVBUF/SNDBUF
+ * are treated in BSD as hints
+ */
+ val = min_t(u32, val, sysctl_rmem_max);
set_rcvbuf:
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
/*
* returning the value we actually used in getsockopt
* is the most desirable behavior.
*/
- if ((val * 2) < SOCK_MIN_RCVBUF)
- sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
- else
- sk->sk_rcvbuf = val * 2;
+ sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
break;
case SO_RCVBUFFORCE:
break;
case SO_PASSCRED:
- v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
+ v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
break;
case SO_PEERCRED:
break;
case SO_PASSSEC:
- v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
+ v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
break;
case SO_PEERSEC:
}
newsk->sk_err = 0;
+ newsk->sk_err_soft = 0;
newsk->sk_priority = 0;
/*
* Before updating sk_refcnt, we must commit prior changes to memory
}
EXPORT_SYMBOL(sock_rfree);
+void sock_efree(struct sk_buff *skb)
+{
+ sock_put(skb->sk);
+}
+EXPORT_SYMBOL(sock_efree);
int sock_i_uid(struct sock *sk)
{
/**
* __sk_reclaim - reclaim memory_allocated
* @sk: socket
+ * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
*/
-void __sk_mem_reclaim(struct sock *sk)
+void __sk_mem_reclaim(struct sock *sk, int amount)
{
struct proto *prot = sk->sk_prot;
- atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
- prot->memory_allocated);
- sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
+ amount >>= SK_MEM_QUANTUM_SHIFT;
+ atomic_long_sub(amount, prot->memory_allocated);
+ sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
if (prot->memory_pressure && *prot->memory_pressure &&
(atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
skb) < 0)
return 1;
- goto discard;
+ consume_skb(skb);
+ return 0;
}
if (dh->dccph_type == DCCP_PKT_RESET)
goto discard;
{
const struct iphdr *iph = (struct iphdr *)skb->data;
const u8 offset = iph->ihl << 2;
- const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
+ const struct dccp_hdr *dh;
struct dccp_sock *dp;
struct inet_sock *inet;
const int type = icmp_hdr(skb)->type;
int err;
struct net *net = dev_net(skb->dev);
- if (skb->len < offset + sizeof(*dh) ||
- skb->len < offset + __dccp_basic_hdr_len(dh)) {
- ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
- return;
- }
+ /* Only need dccph_dport & dccph_sport which are the first
+ * 4 bytes in dccp header.
+ * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us.
+ */
+ BUILD_BUG_ON(offsetof(struct dccp_hdr, dccph_sport) + sizeof(dh->dccph_sport) > 8);
+ BUILD_BUG_ON(offsetof(struct dccp_hdr, dccph_dport) + sizeof(dh->dccph_dport) > 8);
+ dh = (struct dccp_hdr *)(skb->data + offset);
sk = inet_lookup(net, &dccp_hashinfo,
iph->daddr, dh->dccph_dport,
{
const struct dccp_hdr *dh;
unsigned int cscov;
+ u8 dccph_doff;
if (skb->pkt_type != PACKET_HOST)
return 1;
/*
* If P.Data Offset is too small for packet type, drop packet and return
*/
- if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
- DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff);
+ dccph_doff = dh->dccph_doff;
+ if (dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
+ DCCP_WARN("P.Data Offset(%u) too small\n", dccph_doff);
return 1;
}
/*
* If P.Data Offset is too too large for packet, drop packet and return
*/
- if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
- DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff);
+ if (!pskb_may_pull(skb, dccph_doff * sizeof(u32))) {
+ DCCP_WARN("P.Data Offset(%u) too large\n", dccph_doff);
return 1;
}
-
+ dh = dccp_hdr(skb);
/*
* If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
* has short sequence numbers), drop packet and return
goto discard_and_relse;
nf_reset(skb);
- return sk_receive_skb(sk, skb, 1);
+ return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4);
no_dccp_socket:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
inet_ctl_sock_destroy(net->dccp.v4_ctl_sk);
}
+static void __net_exit dccp_v4_exit_batch(struct list_head *net_exit_list)
+{
+ inet_twsk_purge(&dccp_hashinfo, &dccp_death_row, AF_INET);
+}
+
static struct pernet_operations dccp_v4_ops = {
.init = dccp_v4_init_net,
.exit = dccp_v4_exit_net,
+ .exit_batch = dccp_v4_exit_batch,
};
static int __init dccp_v4_init(void)
u8 type, u8 code, int offset, __be32 info)
{
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
- const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
+ const struct dccp_hdr *dh;
struct dccp_sock *dp;
struct ipv6_pinfo *np;
struct sock *sk;
__u64 seq;
struct net *net = dev_net(skb->dev);
- if (skb->len < offset + sizeof(*dh) ||
- skb->len < offset + __dccp_basic_hdr_len(dh)) {
- ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
- ICMP6_MIB_INERRORS);
- return;
- }
+ /* Only need dccph_dport & dccph_sport which are the first
+ * 4 bytes in dccp header.
+ * Our caller (icmpv6_notify()) already pulled 8 bytes for us.
+ */
+ BUILD_BUG_ON(offsetof(struct dccp_hdr, dccph_sport) + sizeof(dh->dccph_sport) > 8);
+ BUILD_BUG_ON(offsetof(struct dccp_hdr, dccph_dport) + sizeof(dh->dccph_dport) > 8);
+ dh = (struct dccp_hdr *)(skb->data + offset);
sk = inet6_lookup(net, &dccp_hashinfo,
&hdr->daddr, dh->dccph_dport,
newsk->sk_backlog_rcv = dccp_v4_do_rcv;
newnp->pktoptions = NULL;
newnp->opt = NULL;
+ newnp->ipv6_mc_list = NULL;
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
/* Clone RX bits */
newnp->rxopt.all = np->rxopt.all;
+ newnp->ipv6_mc_list = NULL;
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
/* Clone pktoptions received with SYN */
newnp->pktoptions = NULL;
if (ireq6->pktopts != NULL) {
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
- return sk_receive_skb(sk, skb, 1) ? -1 : 0;
+ return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0;
no_dccp_socket:
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
+ .bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
}
+static void __net_exit dccp_v6_exit_batch(struct list_head *net_exit_list)
+{
+ inet_twsk_purge(&dccp_hashinfo, &dccp_death_row, AF_INET6);
+}
+
static struct pernet_operations dccp_v6_ops = {
.init = dccp_v6_init_net,
.exit = dccp_v6_exit_net,
+ .exit_batch = dccp_v6_exit_batch,
};
static int __init dccp_v6_init(void)
__kfree_skb(skb);
}
+ /* If socket has been already reset kill it. */
+ if (sk->sk_state == DCCP_CLOSED)
+ goto adjudge_to_death;
+
if (data_was_unread) {
/* Unread data was tossed, send an appropriate Reset Code */
DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
if (!fld.daddr) {
fld.daddr = fld.saddr;
- err = -EADDRNOTAVAIL;
if (dev_out)
dev_put(dev_out);
+ err = -EINVAL;
dev_out = init_net.loopback_dev;
+ if (!dev_out->dn_ptr)
+ goto out;
+ err = -EADDRNOTAVAIL;
dev_hold(dev_out);
if (!fld.daddr) {
fld.daddr =
if (dev_out == NULL)
goto out;
dn_db = rcu_dereference_raw(dev_out->dn_ptr);
+ if (!dn_db)
+ goto e_inval;
/* Possible improvement - check all devices for local addr */
if (dn_dev_islocal(dev_out, fld.daddr)) {
dev_put(dev_out);
dev_put(dev_out);
dev_out = init_net.loopback_dev;
dev_hold(dev_out);
+ if (!dev_out->dn_ptr)
+ goto e_inval;
fld.flowidn_oif = dev_out->ifindex;
if (res.fi)
dn_fib_info_put(res.fi);
goto validate_return_locked;
}
+ if (opt_iter + 1 == opt_len) {
+ err_offset = opt_iter;
+ goto validate_return_locked;
+ }
tag_len = tag[1];
if (tag_len > (opt_len - opt_iter)) {
err_offset = opt_iter + 1;
time_before(jiffies, (in_dev)->mr_v2_seen)))
static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
-static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr);
+static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im);
static void igmpv3_clear_delrec(struct in_device *in_dev);
static int sf_setstate(struct ip_mc_list *pmc);
static void sf_markstate(struct ip_mc_list *pmc);
static void igmp_gq_start_timer(struct in_device *in_dev)
{
int tv = net_random() % in_dev->mr_maxdelay;
+ unsigned long exp = jiffies + tv + 2;
+
+ if (in_dev->mr_gq_running &&
+ time_after_eq(exp, (in_dev->mr_gq_timer).expires))
+ return;
in_dev->mr_gq_running = 1;
- if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2))
+ if (!mod_timer(&in_dev->mr_gq_timer, exp))
in_dev_hold(in_dev);
}
spin_unlock_bh(&in_dev->mc_tomb_lock);
}
-static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
+/*
+ * restore ip_mc_list deleted records
+ */
+static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
{
struct ip_mc_list *pmc, *pmc_prev;
- struct ip_sf_list *psf, *psf_next;
+ struct ip_sf_list *psf;
+ __be32 multiaddr = im->multiaddr;
spin_lock_bh(&in_dev->mc_tomb_lock);
pmc_prev = NULL;
in_dev->mc_tomb = pmc->next;
}
spin_unlock_bh(&in_dev->mc_tomb_lock);
+
+ spin_lock_bh(&im->lock);
if (pmc) {
- for (psf=pmc->tomb; psf; psf=psf_next) {
- psf_next = psf->sf_next;
- kfree(psf);
+ im->interface = pmc->interface;
+ im->crcount = in_dev->mr_qrv ?: IGMP_Unsolicited_Report_Count;
+ im->sfmode = pmc->sfmode;
+ if (pmc->sfmode == MCAST_INCLUDE) {
+ im->tomb = pmc->tomb;
+ im->sources = pmc->sources;
+ for (psf = im->sources; psf; psf = psf->sf_next)
+ psf->sf_crcount = im->crcount;
}
in_dev_put(pmc->interface);
kfree(pmc);
}
+ spin_unlock_bh(&im->lock);
}
+/*
+ * flush ip_mc_list deleted records
+ */
static void igmpv3_clear_delrec(struct in_device *in_dev)
{
struct ip_mc_list *pmc, *nextpmc;
rcu_assign_pointer(in_dev->mc_list, im);
#ifdef CONFIG_IP_MULTICAST
- igmpv3_del_delrec(in_dev, im->multiaddr);
+ igmpv3_del_delrec(in_dev, im);
#endif
igmp_group_added(im);
if (!in_dev->dead)
ASSERT_RTNL();
- for_each_pmc_rtnl(in_dev, pmc)
+ for_each_pmc_rtnl(in_dev, pmc) {
+#ifdef CONFIG_IP_MULTICAST
+ igmpv3_del_delrec(in_dev, pmc);
+#endif
igmp_group_added(pmc);
+ }
}
/* Device going down */
in_dev->mr_gq_running = 0;
if (del_timer(&in_dev->mr_gq_timer))
__in_dev_put(in_dev);
- igmpv3_clear_delrec(in_dev);
#endif
ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
- for_each_pmc_rtnl(in_dev, pmc)
+ for_each_pmc_rtnl(in_dev, pmc) {
+#ifdef CONFIG_IP_MULTICAST
+ igmpv3_del_delrec(in_dev, pmc);
+#endif
igmp_group_added(pmc);
+ }
}
/*
/* Deactivate timers */
ip_mc_down(in_dev);
+#ifdef CONFIG_IP_MULTICAST
+ igmpv3_clear_delrec(in_dev);
+#endif
while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
in_dev->mc_list = i->next_rcu;
in_dev->mc_count--;
-
- /* We've dropped the groups in ip_mc_down already */
- ip_mc_clear_src(i);
ip_ma_put(i);
}
}
inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port;
newsk->sk_write_space = sk_stream_write_space;
+ inet_sk(newsk)->mc_list = NULL;
+
newicsk->icsk_retransmits = 0;
newicsk->icsk_backoff = 0;
newicsk->icsk_probes_out = 0;
cork->length += length;
if (((length > mtu) || (skb && skb_has_frags(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
- (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
(sk->sk_type == SOCK_DGRAM)) {
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, transhdrlen,
*/
int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
- if (!(inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO))
+ if (!(inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) &&
+ !IPCB(skb)->opt.optlen)
skb_dst_drop(skb);
return sock_queue_rcv_skb(sk, skb);
}
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
- if (c)
+ if (c) {
+ c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
c->mfc_un.res.minvif = MAXVIFS;
+ }
return c;
}
.timeout = 180,
};
-static struct nf_conntrack_helper snmp_helper __read_mostly = {
- .me = THIS_MODULE,
- .help = help,
- .expect_policy = &snmp_exp_policy,
- .name = "snmp",
- .tuple.src.l3num = AF_INET,
- .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
- .tuple.dst.protonum = IPPROTO_UDP,
-};
-
static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
.me = THIS_MODULE,
.help = help,
static int __init nf_nat_snmp_basic_init(void)
{
- int ret = 0;
-
BUG_ON(nf_nat_snmp_hook != NULL);
RCU_INIT_POINTER(nf_nat_snmp_hook, help);
- ret = nf_conntrack_helper_register(&snmp_trap_helper);
- if (ret < 0) {
- nf_conntrack_helper_unregister(&snmp_helper);
- return ret;
- }
- return ret;
+ return nf_conntrack_helper_register(&snmp_trap_helper);
}
static void __exit nf_nat_snmp_basic_fini(void)
static void ping_v4_unhash(struct sock *sk)
{
struct inet_sock *isk = inet_sk(sk);
+
pr_debug("ping_v4_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
+ write_lock_bh(&ping_table.lock);
if (sk_hashed(sk)) {
- write_lock_bh(&ping_table.lock);
hlist_nulls_del(&sk->sk_nulls_node);
sk_nulls_node_init(&sk->sk_nulls_node);
sock_put(sk);
isk->inet_num = isk->inet_sport = 0;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- write_unlock_bh(&ping_table.lock);
}
+ write_unlock_bh(&ping_table.lock);
}
static struct sock *ping_v4_lookup(struct net *net, u32 saddr, u32 daddr,
{
struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
+ if (!skb)
+ return 0;
pfh->wcheck = csum_partial((char *)&pfh->icmph,
sizeof(struct icmphdr), pfh->wcheck);
pfh->icmph.checksum = csum_fold(pfh->wcheck);
if (len > 0xFFFF)
return -EMSGSIZE;
+ /* Must have at least a full ICMP header. */
+ if (len < sizeof(struct icmphdr))
+ return -EINVAL;
+
/*
* Check the flags.
*/
*/
if (fi && res->prefixlen < 4)
fi = NULL;
+ } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
+ (orig_oif != dev_out->ifindex)) {
+ /* For local routes that require a particular output interface
+ * we do not want to cache the result. Caching the result
+ * causes incorrect behaviour when there are multiple source
+ * addresses on the interface, the end result being that if the
+ * intended recipient is waiting on that interface for the
+ * packet he won't receive it because it will be delivered on
+ * the loopback interface and the IP_PKTINFO ipi_ifindex will
+ * be set to the loopback interface as well.
+ */
+ fi = NULL;
}
rth = rt_dst_alloc(dev_out,
ret = -EAGAIN;
break;
}
+ /* if __tcp_splice_read() got nothing while we have
+ * an skb in receive queue, we do not want to loop.
+ * This might happen with URG data.
+ */
+ if (!skb_queue_empty(&sk->sk_receive_queue))
+ break;
sk_wait_data(sk, &timeo);
if (signal_pending(current)) {
ret = sock_intr_errno(timeo);
}
EXPORT_SYMBOL(tcp_v4_do_rcv);
+int tcp_filter(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcphdr *th = (struct tcphdr *)skb->data;
+ unsigned int eaten = skb->len;
+ int err;
+
+ err = sk_filter_trim_cap(sk, skb, th->doff * 4);
+ if (!err) {
+ eaten -= skb->len;
+ TCP_SKB_CB(skb)->end_seq -= eaten;
+ }
+ return err;
+}
+EXPORT_SYMBOL(tcp_filter);
+
/*
* From tcp_input.c
*/
goto discard_and_relse;
nf_reset(skb);
- if (sk_filter(sk, skb))
+ if (tcp_filter(sk, skb))
goto discard_and_relse;
+ th = (const struct tcphdr *)skb->data;
+ iph = ip_hdr(skb);
skb->dev = NULL;
len = 0;
tcp_for_write_queue_from_safe(skb, next, sk) {
copy = min_t(int, skb->len, probe_size - len);
- if (nskb->ip_summed)
+ if (nskb->ip_summed) {
skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
- else
- nskb->csum = skb_copy_and_csum_bits(skb, 0,
- skb_put(nskb, copy),
- copy, nskb->csum);
+ } else {
+ __wsum csum = skb_copy_and_csum_bits(skb, 0,
+ skb_put(nskb, copy),
+ copy, 0);
+ nskb->csum = csum_block_add(nskb->csum, csum, len);
+ }
if (skb->len <= copy) {
/* We've eaten all the data from this skb.
int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
int window;
- if (mss > full_space)
+ if (unlikely(mss > full_space)) {
mss = full_space;
-
+ if (mss <= 0)
+ return 0;
+ }
if (free_space < (full_space >> 1)) {
icsk->icsk_ack.quick = 0;
* copying overhead: fragmentation, tunneling, mangling etc.
*/
if (atomic_read(&sk->sk_wmem_alloc) >
- min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
+ min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
+ sk->sk_sndbuf))
return -EAGAIN;
if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
struct net_device *dev;
struct inet6_dev *idev;
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
+ for_each_netdev(net, dev) {
idev = __in6_dev_get(dev);
if (idev) {
int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
dev_disable_change(idev);
}
}
- rcu_read_unlock();
}
static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old)
const struct inet6_protocol *ops;
int proto;
struct frag_hdr *fptr;
- unsigned int unfrag_ip6hlen;
u8 *prevhdr;
int offset = 0;
ipv6h->payload_len = htons(skb->len - skb->mac_len -
sizeof(*ipv6h));
if (proto == IPPROTO_UDP) {
- unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
+ int err = ip6_find_1stfragopt(skb, &prevhdr);
+ if (err < 0)
+ return ERR_PTR(err);
fptr = (struct frag_hdr *)(skb_network_header(skb) +
- unfrag_ip6hlen);
+ err);
fptr->frag_off = htons(offset);
if (skb->next != NULL)
fptr->frag_off |= htons(IP6_MF);
ops = rcu_dereference(inet6_protos[proto]);
if (!ops || !ops->gro_receive) {
__pskb_pull(skb, skb_gro_offset(skb));
+ skb_gro_frag0_invalidate(skb);
proto = ipv6_gso_pull_exthdrs(skb, proto);
skb_gro_pull(skb, -skb_transport_offset(skb));
skb_reset_transport_header(skb);
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
{
u16 offset = sizeof(struct ipv6hdr);
- struct ipv6_opt_hdr *exthdr =
- (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
unsigned int packet_len = skb->tail - skb->network_header;
int found_rhdr = 0;
*nexthdr = &ipv6_hdr(skb)->nexthdr;
- while (offset + 1 <= packet_len) {
+ while (offset <= packet_len) {
+ struct ipv6_opt_hdr *exthdr;
switch (**nexthdr) {
return offset;
}
- offset += ipv6_optlen(exthdr);
- *nexthdr = &exthdr->nexthdr;
+ if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
+ return -EINVAL;
+
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
offset);
+ offset += ipv6_optlen(exthdr);
+ *nexthdr = &exthdr->nexthdr;
}
- return offset;
+ return -EINVAL;
}
void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
u8 *prevhdr, nexthdr = 0;
struct net *net = dev_net(skb_dst(skb)->dev);
- hlen = ip6_find_1stfragopt(skb, &prevhdr);
+ err = ip6_find_1stfragopt(skb, &prevhdr);
+ if (err < 0)
+ goto fail;
+ hlen = err;
nexthdr = *prevhdr;
mtu = ip6_skb_dst_mtu(skb);
if (((length > mtu) ||
(skb && skb_has_frags(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
- (rt->dst.dev->features & NETIF_F_UFO) &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
(sk->sk_type == SOCK_DGRAM)) {
err = ip6_ufo_append_data(sk, getfrag, from, length,
hh_len, fragheaderlen, exthdrlen,
*/
alloclen += sizeof(struct frag_hdr);
+ copy = datalen - transhdrlen - fraggap;
+ if (copy < 0) {
+ err = -EINVAL;
+ goto error;
+ }
if (transhdrlen) {
skb = sock_alloc_send_skb(sk,
alloclen + hh_len,
data += fraggap;
pskb_trim_unique(skb_prev, maxfraglen);
}
- copy = datalen - transhdrlen - fraggap;
-
- if (copy < 0) {
- err = -EINVAL;
- kfree_skb(skb);
- goto error;
- } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
+ if (copy > 0 &&
+ getfrag(from, data + transhdrlen, offset,
+ copy, fraggap, skb) < 0) {
err = -EFAULT;
kfree_skb(skb);
goto error;
static __u16
parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
{
- const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
- __u8 nexthdr = ipv6h->nexthdr;
- __u16 off = sizeof (*ipv6h);
+ const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
+ unsigned int nhoff = raw - skb->data;
+ unsigned int off = nhoff + sizeof(*ipv6h);
+ u8 next, nexthdr = ipv6h->nexthdr;
while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
- __u16 optlen = 0;
struct ipv6_opt_hdr *hdr;
- if (raw + off + sizeof (*hdr) > skb->data &&
- !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
+ u16 optlen;
+
+ if (!pskb_may_pull(skb, off + sizeof(*hdr)))
break;
- hdr = (struct ipv6_opt_hdr *) (raw + off);
+ hdr = (struct ipv6_opt_hdr *)(skb->data + off);
if (nexthdr == NEXTHDR_FRAGMENT) {
struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
if (frag_hdr->frag_off)
} else {
optlen = ipv6_optlen(hdr);
}
+ /* cache hdr->nexthdr, since pskb_may_pull() might
+ * invalidate hdr
+ */
+ next = hdr->nexthdr;
if (nexthdr == NEXTHDR_DEST) {
- __u16 i = off + 2;
+ u16 i = 2;
+
+ /* Remember : hdr is no longer valid at this point. */
+ if (!pskb_may_pull(skb, off + optlen))
+ break;
+
while (1) {
struct ipv6_tlv_tnl_enc_lim *tel;
/* No more room for encapsulation limit */
- if (i + sizeof (*tel) > off + optlen)
+ if (i + sizeof(*tel) > optlen)
break;
- tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
+ tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
/* return index of option if found and valid */
if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
tel->length == 1)
- return i;
+ return i + off - nhoff;
/* else jump to next option */
if (tel->type)
i += tel->length + 2;
i++;
}
}
- nexthdr = hdr->nexthdr;
+ nexthdr = next;
off += optlen;
}
return 0;
struct ipv6_tel_txoption opt;
struct dst_entry *dst = NULL, *ndst = NULL;
struct net_device *tdev;
+ bool use_cache = false;
int mtu;
unsigned int max_headroom = sizeof(struct ipv6hdr);
u8 proto;
int err = -1;
int pkt_len;
- if (!fl6->flowi6_mark)
+ if (!(t->parms.flags &
+ (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
+ /* enable the cache only only if the routing decision does
+ * not depend on the current inner header value
+ */
+ use_cache = true;
+ }
+
+ if (use_cache)
dst = ip6_tnl_dst_check(t);
if (!dst) {
ndst = ip6_route_output(net, NULL, fl6);
skb = new_skb;
}
skb_dst_drop(skb);
- if (fl6->flowi6_mark) {
+ if (!use_cache) {
skb_dst_set(skb, dst);
ndst = NULL;
} else {
ipv6_addr_copy(&ipv6h->saddr, &fl6->saddr);
ipv6_addr_copy(&ipv6h->daddr, &fl6->daddr);
nf_reset(skb);
+ memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
pkt_len = skb->len;
err = ip6_local_out(skb);
struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
if (c == NULL)
return NULL;
+ c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
c->mfc_un.res.minvif = MAXMIFS;
return c;
}
static void mld_ifc_timer_expire(unsigned long data);
static void mld_ifc_event(struct inet6_dev *idev);
static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
-static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
+static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
static void mld_clear_delrec(struct inet6_dev *idev);
static int sf_setstate(struct ifmcaddr6 *pmc);
static void sf_markstate(struct ifmcaddr6 *pmc);
dev_mc_del(dev, buf);
}
- if (mc->mca_flags & MAF_NOREPORT)
- goto done;
spin_unlock_bh(&mc->mca_lock);
+ if (mc->mca_flags & MAF_NOREPORT)
+ return;
if (!mc->idev->dead)
igmp6_leave_group(mc);
spin_lock_bh(&mc->mca_lock);
if (del_timer(&mc->mca_timer))
atomic_dec(&mc->mca_refcnt);
-done:
- ip6_mc_clear_src(mc);
spin_unlock_bh(&mc->mca_lock);
}
spin_unlock_bh(&idev->mc_lock);
}
-static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
+static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
{
struct ifmcaddr6 *pmc, *pmc_prev;
- struct ip6_sf_list *psf, *psf_next;
+ struct ip6_sf_list *psf;
+ struct in6_addr *pmca = &im->mca_addr;
spin_lock_bh(&idev->mc_lock);
pmc_prev = NULL;
}
spin_unlock_bh(&idev->mc_lock);
+ spin_lock_bh(&im->mca_lock);
if (pmc) {
- for (psf=pmc->mca_tomb; psf; psf=psf_next) {
- psf_next = psf->sf_next;
- kfree(psf);
+ im->idev = pmc->idev;
+ im->mca_crcount = idev->mc_qrv;
+ im->mca_sfmode = pmc->mca_sfmode;
+ if (pmc->mca_sfmode == MCAST_INCLUDE) {
+ im->mca_tomb = pmc->mca_tomb;
+ im->mca_sources = pmc->mca_sources;
+ for (psf = im->mca_sources; psf; psf = psf->sf_next)
+ psf->sf_crcount = im->mca_crcount;
}
in6_dev_put(pmc->idev);
kfree(pmc);
}
+ spin_unlock_bh(&im->mca_lock);
}
static void mld_clear_delrec(struct inet6_dev *idev)
idev->mc_list = mc;
write_unlock_bh(&idev->lock);
- mld_del_delrec(idev, &mc->mca_addr);
+ mld_del_delrec(idev, mc);
igmp6_group_added(mc);
ma_put(mc);
return 0;
write_unlock_bh(&idev->lock);
igmp6_group_dropped(ma);
+ ip6_mc_clear_src(ma);
ma_put(ma);
return 0;
/* Withdraw multicast list */
read_lock_bh(&idev->lock);
+
+ for (i = idev->mc_list; i; i=i->next)
+ igmp6_group_dropped(i);
+
+ /* Should stop timer after group drop. or we will
+ * start timer again in mld_ifc_event()
+ */
idev->mc_ifc_count = 0;
if (del_timer(&idev->mc_ifc_timer))
__in6_dev_put(idev);
idev->mc_gq_running = 0;
if (del_timer(&idev->mc_gq_timer))
__in6_dev_put(idev);
-
- for (i = idev->mc_list; i; i=i->next)
- igmp6_group_dropped(i);
read_unlock_bh(&idev->lock);
-
- mld_clear_delrec(idev);
}
/* Install multicast list, except for all-nodes (already installed) */
read_lock_bh(&idev->lock);
- for (i = idev->mc_list; i; i=i->next)
+ for (i = idev->mc_list; i; i = i->next) {
+ mld_del_delrec(idev, i);
igmp6_group_added(i);
+ }
read_unlock_bh(&idev->lock);
}
/* Deactivate timers */
ipv6_mc_down(idev);
+ mld_clear_delrec(idev);
/* Delete all-nodes address. */
/* We cannot call ipv6_dev_mc_dec() directly, our caller in
write_lock_bh(&idev->lock);
while ((i = idev->mc_list) != NULL) {
idev->mc_list = i->next;
- write_unlock_bh(&idev->lock);
- igmp6_group_dropped(i);
+ write_unlock_bh(&idev->lock);
ma_put(i);
-
write_lock_bh(&idev->lock);
}
write_unlock_bh(&idev->lock);
}
offset += skb_transport_offset(skb);
- if (skb_copy_bits(skb, offset, &csum, 2))
- BUG();
+ err = skb_copy_bits(skb, offset, &csum, 2);
+ if (err < 0) {
+ ip6_flush_pending_frames(sk);
+ goto out;
+ }
/* in case cksum was not initialized */
if (unlikely(csum))
rtm->rtm_type = RTN_UNREACHABLE;
else if (rt->rt6i_flags&RTF_LOCAL)
rtm->rtm_type = RTN_LOCAL;
+ else if (rt->rt6i_flags & RTF_ANYCAST)
+ rtm->rtm_type = RTN_ANYCAST;
else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
rtm->rtm_type = RTN_LOCAL;
else
newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif
+ newnp->ipv6_mc_list = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
First: no IPv4 options.
*/
newinet->inet_opt = NULL;
+ newnp->ipv6_mc_list = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
goto discard;
#endif
- if (sk_filter(sk, skb))
+ if (tcp_filter(sk, skb))
goto discard;
/*
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
- if (sk_filter(sk, skb))
+ if (tcp_filter(sk, skb))
goto discard_and_relse;
+ th = (const struct tcphdr *)skb->data;
+ hdr = ipv6_hdr(skb);
skb->dev = NULL;
u8 frag_hdr_sz = sizeof(struct frag_hdr);
int offset;
__wsum csum;
+ int err;
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
/* Find the unfragmentable header and shift it left by frag_hdr_sz
* bytes to insert fragment header.
*/
- unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
+ err = ip6_find_1stfragopt(skb, &prevhdr);
+ if (err < 0)
+ return ERR_PTR(err);
+ unfrag_ip6hlen = err;
nexthdr = *prevhdr;
*prevhdr = NEXTHDR_FRAGMENT;
unfrag_len = skb_network_header(skb) - skb_mac_header(skb) +
sipx->sipx_network = ipxif->if_netnum;
memcpy(sipx->sipx_node, ipxif->if_node,
sizeof(sipx->sipx_node));
- rc = -EFAULT;
+ rc = 0;
if (copy_to_user(arg, &ifr, sizeof(ifr)))
- break;
+ rc = -EFAULT;
ipxitf_put(ipxif);
- rc = 0;
break;
}
case SIOCAIPXITFCRT:
* for deallocating this structure if it's complex. If not the user can
* just supply kfree, which should take care of the job.
*/
-#ifdef CONFIG_LOCKDEP
-static int hashbin_lock_depth = 0;
-#endif
int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
{
irda_queue_t* queue;
IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
/* Synchronize */
- if ( hashbin->hb_type & HB_LOCK ) {
- spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags,
- hashbin_lock_depth++);
- }
+ if (hashbin->hb_type & HB_LOCK)
+ spin_lock_irqsave(&hashbin->hb_spinlock, flags);
/*
* Free the entries in the hashbin, TODO: use hashbin_clear when
* it has been shown to work
*/
for (i = 0; i < HASHBIN_SIZE; i ++ ) {
- queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
- while (queue ) {
- if (free_func)
- (*free_func)(queue);
- queue = dequeue_first(
- (irda_queue_t**) &hashbin->hb_queue[i]);
+ while (1) {
+ queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
+
+ if (!queue)
+ break;
+
+ if (free_func) {
+ if (hashbin->hb_type & HB_LOCK)
+ spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
+ free_func(queue);
+ if (hashbin->hb_type & HB_LOCK)
+ spin_lock_irqsave(&hashbin->hb_spinlock, flags);
+ }
}
}
hashbin->magic = ~HB_MAGIC;
/* Release lock */
- if ( hashbin->hb_type & HB_LOCK) {
+ if (hashbin->hb_type & HB_LOCK)
spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
-#ifdef CONFIG_LOCKDEP
- hashbin_lock_depth--;
-#endif
- }
/*
* Free the hashbin structure
} u;
struct sk_buff *skb;
} dump;
+ struct mutex dump_lock;
};
static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
{
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
struct sock *sk;
+ struct pfkey_sock *pfk;
int err;
if (!capable(CAP_NET_ADMIN))
if (sk == NULL)
goto out;
+ pfk = pfkey_sk(sk);
+ mutex_init(&pfk->dump_lock);
+
sock->ops = &pfkey_ops;
sock_init_data(sock, sk);
struct sadb_msg *hdr;
int rc;
+ mutex_lock(&pfk->dump_lock);
+ if (!pfk->dump.dump) {
+ rc = 0;
+ goto out;
+ }
+
rc = pfk->dump.dump(pfk);
- if (rc == -ENOBUFS)
- return 0;
+ if (rc == -ENOBUFS) {
+ rc = 0;
+ goto out;
+ }
if (pfk->dump.skb) {
- if (!pfkey_can_dump(&pfk->sk))
- return 0;
+ if (!pfkey_can_dump(&pfk->sk)) {
+ rc = 0;
+ goto out;
+ }
hdr = (struct sadb_msg *) pfk->dump.skb->data;
hdr->sadb_msg_seq = 0;
}
pfkey_terminate_dump(pfk);
+
+out:
+ mutex_unlock(&pfk->dump_lock);
return rc;
}
u8 proto;
struct pfkey_sock *pfk = pfkey_sk(sk);
- if (pfk->dump.dump != NULL)
+ mutex_lock(&pfk->dump_lock);
+ if (pfk->dump.dump != NULL) {
+ mutex_unlock(&pfk->dump_lock);
return -EBUSY;
+ }
proto = pfkey_satype2proto(hdr->sadb_msg_satype);
- if (proto == 0)
+ if (proto == 0) {
+ mutex_unlock(&pfk->dump_lock);
return -EINVAL;
+ }
pfk->dump.msg_version = hdr->sadb_msg_version;
pfk->dump.msg_pid = hdr->sadb_msg_pid;
pfk->dump.dump = pfkey_dump_sa;
pfk->dump.done = pfkey_dump_sa_done;
xfrm_state_walk_init(&pfk->dump.u.state, proto);
+ mutex_unlock(&pfk->dump_lock);
return pfkey_do_dump(pfk);
}
{
struct pfkey_sock *pfk = pfkey_sk(sk);
- if (pfk->dump.dump != NULL)
+ mutex_lock(&pfk->dump_lock);
+ if (pfk->dump.dump != NULL) {
+ mutex_unlock(&pfk->dump_lock);
return -EBUSY;
+ }
pfk->dump.msg_version = hdr->sadb_msg_version;
pfk->dump.msg_pid = hdr->sadb_msg_pid;
pfk->dump.dump = pfkey_dump_sp;
pfk->dump.done = pfkey_dump_sp_done;
xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
+ mutex_unlock(&pfk->dump_lock);
return pfkey_do_dump(pfk);
}
}
EXPORT_SYMBOL_GPL(l2tp_session_find);
-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
+/* Like l2tp_session_find() but takes a reference on the returned session.
+ * Optionally calls session->ref() too if do_ref is true.
+ */
+struct l2tp_session *l2tp_session_get(struct net *net,
+ struct l2tp_tunnel *tunnel,
+ u32 session_id, bool do_ref)
+{
+ struct hlist_head *session_list;
+ struct l2tp_session *session;
+ struct hlist_node *walk;
+
+ if (!tunnel) {
+ struct l2tp_net *pn = l2tp_pernet(net);
+
+ session_list = l2tp_session_id_hash_2(pn, session_id);
+
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) {
+ if (session->session_id == session_id) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
+ rcu_read_unlock_bh();
+
+ return session;
+ }
+ }
+ rcu_read_unlock_bh();
+
+ return NULL;
+ }
+
+ session_list = l2tp_session_id_hash(tunnel, session_id);
+ read_lock_bh(&tunnel->hlist_lock);
+ hlist_for_each_entry(session, walk, session_list, hlist) {
+ if (session->session_id == session_id) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
+ read_unlock_bh(&tunnel->hlist_lock);
+
+ return session;
+ }
+ }
+ read_unlock_bh(&tunnel->hlist_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_get);
+
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+ bool do_ref)
{
int hash;
struct hlist_node *walk;
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) {
if (++count > nth) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
read_unlock_bh(&tunnel->hlist_lock);
return session;
}
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
+EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
/* Lookup a session by interface name.
* This is very inefficient but is only used by management interfaces.
*/
-struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
+struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
+ bool do_ref)
{
struct l2tp_net *pn = l2tp_pernet(net);
int hash;
for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) {
if (!strcmp(session->ifname, ifname)) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
rcu_read_unlock_bh();
+
return session;
}
}
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
+EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
+
+static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
+ struct l2tp_session *session)
+{
+ struct l2tp_session *session_walk;
+ struct hlist_head *g_head;
+ struct hlist_head *head;
+ struct l2tp_net *pn;
+ struct hlist_node *walk;
+
+ head = l2tp_session_id_hash(tunnel, session->session_id);
+
+ write_lock_bh(&tunnel->hlist_lock);
+ hlist_for_each_entry(session_walk, walk, head, hlist)
+ if (session_walk->session_id == session->session_id)
+ goto exist;
+
+ if (tunnel->version == L2TP_HDR_VER_3) {
+ pn = l2tp_pernet(tunnel->l2tp_net);
+ g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net),
+ session->session_id);
+
+ spin_lock_bh(&pn->l2tp_session_hlist_lock);
+ hlist_for_each_entry(session_walk, walk, g_head, global_hlist)
+ if (session_walk->session_id == session->session_id)
+ goto exist_glob;
+
+ hlist_add_head_rcu(&session->global_hlist, g_head);
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ }
+
+ hlist_add_head(&session->hlist, head);
+ write_unlock_bh(&tunnel->hlist_lock);
+
+ return 0;
+
+exist_glob:
+ spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+exist:
+ write_unlock_bh(&tunnel->hlist_lock);
+
+ return -EEXIST;
+}
/* Lookup a tunnel by id
*/
* a data (not control) frame before coming here. Fields up to the
* session-id have already been parsed and ptr points to the data
* after the session-id.
+ *
+ * session->ref() must have been called prior to l2tp_recv_common().
+ * session->deref() will be called automatically after skb is processed.
*/
void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
unsigned char *ptr, unsigned char *optr, u16 hdrflags,
int offset;
u32 ns, nr;
- /* The ref count is increased since we now hold a pointer to
- * the session. Take care to decrement the refcnt when exiting
- * this function from now on...
- */
- l2tp_session_inc_refcount(session);
- if (session->ref)
- (*session->ref)(session);
-
/* Parse and check optional cookie */
if (session->peer_cookie_len > 0) {
if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
/* Try to dequeue as many skbs from reorder_q as we can. */
l2tp_recv_dequeue(session);
- l2tp_session_dec_refcount(session);
-
return;
discard:
if (session->deref)
(*session->deref)(session);
-
- l2tp_session_dec_refcount(session);
}
EXPORT_SYMBOL(l2tp_recv_common);
}
/* Find the session context */
- session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
+ session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true);
if (!session || !session->recv_skb) {
+ if (session) {
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ }
+
/* Not found? Pass to userspace to deal with */
PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
"%s: no session found (%u/%u). Passing up.\n",
}
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
+ l2tp_session_dec_refcount(session);
return 0;
struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
{
struct l2tp_session *session;
+ int err;
session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
if (session != NULL) {
l2tp_session_set_header_len(session, tunnel->version);
+ err = l2tp_session_add_to_tunnel(tunnel, session);
+ if (err) {
+ kfree(session);
+
+ return ERR_PTR(err);
+ }
+
/* Bump the reference count. The session context is deleted
* only when this drops to zero.
*/
/* Ensure tunnel socket isn't deleted */
sock_hold(tunnel->sock);
- /* Add session to the tunnel's hash list */
- write_lock_bh(&tunnel->hlist_lock);
- hlist_add_head(&session->hlist,
- l2tp_session_id_hash(tunnel, session_id));
- write_unlock_bh(&tunnel->hlist_lock);
-
- /* And to the global session list if L2TPv3 */
- if (tunnel->version != L2TP_HDR_VER_2) {
- struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
-
- spin_lock_bh(&pn->l2tp_session_hlist_lock);
- hlist_add_head_rcu(&session->global_hlist,
- l2tp_session_id_hash_2(pn, session_id));
- spin_unlock_bh(&pn->l2tp_session_hlist_lock);
- }
-
/* Ignore management session in session count value */
if (session->session_id != 0)
atomic_inc(&l2tp_session_count);
+
+ return session;
}
- return session;
+ return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(l2tp_session_create);
return tunnel;
}
+struct l2tp_session *l2tp_session_get(struct net *net,
+ struct l2tp_tunnel *tunnel,
+ u32 session_id, bool do_ref);
extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
-extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
-extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+ bool do_ref);
+struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
+ bool do_ref);
extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
+int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
/* Session reference counts. Incremented when code obtains a reference
* to a session.
static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
{
- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
pd->session_idx++;
if (pd->session == NULL) {
}
/* Show the tunnel or session context */
- if (pd->session == NULL)
+ if (!pd->session) {
l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
- else
+ } else {
l2tp_dfs_seq_session_show(m, pd->session);
+ if (pd->session->deref)
+ pd->session->deref(pd->session);
+ l2tp_session_dec_refcount(pd->session);
+ }
out:
return 0;
goto out;
}
- session = l2tp_session_find(net, tunnel, session_id);
- if (session) {
- rc = -EEXIST;
- goto out;
- }
-
if (cfg->ifname) {
dev = dev_get_by_name(net, cfg->ifname);
if (dev) {
session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
peer_session_id, cfg);
- if (!session) {
- rc = -ENOMEM;
+ if (IS_ERR(session)) {
+ rc = PTR_ERR(session);
goto out;
}
* 2 of the License, or (at your option) any later version.
*/
+#include <asm/ioctls.h>
#include <linux/icmp.h>
#include <linux/module.h>
#include <linux/skbuff.h>
}
/* Ok, this is a data packet. Lookup the session. */
- session = l2tp_session_find(&init_net, NULL, session_id);
- if (session == NULL)
+ session = l2tp_session_get(&init_net, NULL, session_id, true);
+ if (!session)
goto discard;
tunnel = session->tunnel;
- if (tunnel == NULL)
- goto discard;
+ if (!tunnel)
+ goto discard_sess;
/* Trace packet contents, if enabled */
if (tunnel->debug & L2TP_MSG_DATA) {
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
- goto discard;
+ goto discard_sess;
/* Point to L2TP header */
optr = ptr = skb->data;
}
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
+ l2tp_session_dec_refcount(session);
return 0;
return sk_receive_skb(sk, skb, 1);
+discard_sess:
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ goto discard;
+
discard_put:
sock_put(sk);
int ret;
int chk_addr_ret;
- if (!sock_flag(sk, SOCK_ZAPPED))
- return -EINVAL;
if (addr_len < sizeof(struct sockaddr_l2tpip))
return -EINVAL;
if (addr->l2tp_family != AF_INET)
read_unlock_bh(&l2tp_ip_lock);
lock_sock(sk);
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ goto out;
+
if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
goto out;
return copied;
}
+int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+{
+ struct sk_buff *skb;
+ int amount;
+
+ switch (cmd) {
+ case SIOCOUTQ:
+ amount = sk_wmem_alloc_get(sk);
+ break;
+ case SIOCINQ:
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
+ amount = skb ? skb->len : 0;
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return put_user(amount, (int __user *)arg);
+}
+EXPORT_SYMBOL(l2tp_ioctl);
+
static struct proto l2tp_ip_prot = {
.name = "L2TP/IP",
.owner = THIS_MODULE,
.bind = l2tp_ip_bind,
.connect = l2tp_ip_connect,
.disconnect = l2tp_ip_disconnect,
- .ioctl = udp_ioctl,
+ .ioctl = l2tp_ioctl,
.destroy = l2tp_ip_destroy_sock,
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
/* Accessed under genl lock */
static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
-static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
+static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
+ bool do_ref)
{
u32 tunnel_id;
u32 session_id;
if (info->attrs[L2TP_ATTR_IFNAME]) {
ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
- session = l2tp_session_find_by_ifname(net, ifname);
+ session = l2tp_session_get_by_ifname(net, ifname, do_ref);
} else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
(info->attrs[L2TP_ATTR_CONN_ID])) {
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (tunnel)
- session = l2tp_session_find(net, tunnel, session_id);
+ session = l2tp_session_get(net, tunnel, session_id,
+ do_ref);
}
return session;
struct l2tp_session *session;
u16 pw_type;
- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, true);
if (session == NULL) {
ret = -ENODEV;
goto out;
if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+
out:
return ret;
}
int ret = 0;
struct l2tp_session *session;
- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, false);
if (session == NULL) {
ret = -ENODEV;
goto out;
if (info->attrs[L2TP_ATTR_MRU])
session->mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);
+ l2tp_session_dec_refcount(session);
+
out:
return ret;
}
struct sk_buff *msg;
int ret;
- session = l2tp_nl_session_find(info);
+ session = l2tp_nl_session_get(info, false);
if (session == NULL) {
ret = -ENODEV;
- goto out;
+ goto err;
}
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
- goto out;
+ goto err_ref;
}
ret = l2tp_nl_session_send(msg, info->snd_pid, info->snd_seq,
0, session);
if (ret < 0)
- goto err_out;
+ goto err_ref_msg;
- return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+ ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
-err_out:
- nlmsg_free(msg);
+ l2tp_session_dec_refcount(session);
-out:
+ return ret;
+
+err_ref_msg:
+ nlmsg_free(msg);
+err_ref:
+ l2tp_session_dec_refcount(session);
+err:
return ret;
}
goto out;
}
- session = l2tp_session_find_nth(tunnel, si);
+ session = l2tp_session_get_nth(tunnel, si, false);
if (session == NULL) {
ti++;
tunnel = NULL;
if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- session) <= 0)
+ session) <= 0) {
+ l2tp_session_dec_refcount(session);
break;
+ }
+ l2tp_session_dec_refcount(session);
si++;
}
int error = 0;
u32 tunnel_id, peer_tunnel_id;
u32 session_id, peer_session_id;
+ bool drop_refcnt = false;
int ver = 2;
int fd;
tunnel->peer_tunnel_id = sp3->pppol2tp.d_tunnel;
}
- /* Create session if it doesn't already exist. We handle the
- * case where a session was previously created by the netlink
- * interface by checking that the session doesn't already have
- * a socket and its tunnel socket are what we expect. If any
- * of those checks fail, return EEXIST to the caller.
- */
- session = l2tp_session_find(sock_net(sk), tunnel, session_id);
- if (session == NULL) {
- /* Default MTU must allow space for UDP/L2TP/PPP
- * headers.
+ session = l2tp_session_get(sock_net(sk), tunnel, session_id, false);
+ if (session) {
+ drop_refcnt = true;
+ ps = l2tp_session_priv(session);
+
+ /* Using a pre-existing session is fine as long as it hasn't
+ * been connected yet.
*/
- cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+ if (ps->sock) {
+ error = -EEXIST;
+ goto end;
+ }
- /* Allocate and initialize a new session context. */
- session = l2tp_session_create(sizeof(struct pppol2tp_session),
- tunnel, session_id,
- peer_session_id, &cfg);
- if (session == NULL) {
- error = -ENOMEM;
+ /* consistency checks */
+ if (ps->tunnel_sock != tunnel->sock) {
+ error = -EEXIST;
goto end;
}
} else {
- ps = l2tp_session_priv(session);
- error = -EEXIST;
- if (ps->sock != NULL)
- goto end;
+ /* Default MTU must allow space for UDP/L2TP/PPP headers */
+ cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+ cfg.mru = cfg.mtu;
- /* consistency checks */
- if (ps->tunnel_sock != tunnel->sock)
+ session = l2tp_session_create(sizeof(struct pppol2tp_session),
+ tunnel, session_id,
+ peer_session_id, &cfg);
+ if (IS_ERR(session)) {
+ error = PTR_ERR(session);
goto end;
+ }
}
/* Associate session with its PPPoL2TP socket */
"%s: created\n", session->name);
end:
+ if (drop_refcnt)
+ l2tp_session_dec_refcount(session);
release_sock(sk);
return error;
if (tunnel->sock == NULL)
goto out;
- /* Check that this session doesn't already exist */
- error = -EEXIST;
- session = l2tp_session_find(net, tunnel, session_id);
- if (session != NULL)
- goto out;
-
/* Default MTU values. */
if (cfg->mtu == 0)
cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
cfg->mru = cfg->mtu;
/* Allocate and initialize a new session context. */
- error = -ENOMEM;
session = l2tp_session_create(sizeof(struct pppol2tp_session),
tunnel, session_id,
peer_session_id, cfg);
- if (session == NULL)
+ if (IS_ERR(session)) {
+ error = PTR_ERR(session);
goto out;
+ }
ps = l2tp_session_priv(session);
ps->tunnel_sock = tunnel->sock;
if (stats.session_id != 0) {
/* resend to session ioctl handler */
struct l2tp_session *session =
- l2tp_session_find(sock_net(sk), tunnel, stats.session_id);
- if (session != NULL)
- err = pppol2tp_session_ioctl(session, cmd, arg);
- else
+ l2tp_session_get(sock_net(sk), tunnel,
+ stats.session_id, true);
+
+ if (session) {
+ err = pppol2tp_session_ioctl(session, cmd,
+ arg);
+ if (session->deref)
+ session->deref(session);
+ l2tp_session_dec_refcount(session);
+ } else {
err = -EBADR;
+ }
break;
}
#ifdef CONFIG_XFRM
} else
err = pppol2tp_session_setsockopt(sk, session, optname, val);
- err = 0;
-
end_put_sess:
sock_put(sk);
end:
err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
sock_put(ps->tunnel_sock);
- } else
+ if (err)
+ goto end_put_sess;
+ } else {
err = pppol2tp_session_getsockopt(sk, session, optname, &val);
+ if (err)
+ goto end_put_sess;
+ }
err = -EFAULT;
if (put_user(len, (int __user *) optlen))
static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
{
- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
pd->session_idx++;
if (pd->session == NULL) {
/* Show the tunnel or session context.
*/
- if (pd->session == NULL)
+ if (!pd->session) {
pppol2tp_seq_tunnel_show(m, pd->tunnel);
- else
+ } else {
pppol2tp_seq_session_show(m, pd->session);
+ if (pd->session->deref)
+ pd->session->deref(pd->session);
+ l2tp_session_dec_refcount(pd->session);
+ }
out:
return 0;
* another trick required to cope with how the PROCOM state
* machine works. -acme
*/
+ skb_orphan(skb);
+ sock_hold(sk);
skb->sk = sk;
+ skb->destructor = sock_efree;
}
if (!sock_owned_by_user(sk))
llc_conn_rcv(sk, skb);
ev->type = LLC_SAP_EV_TYPE_PDU;
ev->reason = 0;
+ skb_orphan(skb);
+ sock_hold(sk);
skb->sk = sk;
+ skb->destructor = sock_efree;
llc_sap_state_process(sap, skb);
}
/* fast-forward to vendor IEs */
offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
- if (offset) {
+ if (offset < ifmsh->ie_len) {
len = ifmsh->ie_len - offset;
data = ifmsh->ie + offset;
if (skb_tailroom(skb) < len)
if (!(status->rx_flags & IEEE80211_RX_AMSDU))
return RX_CONTINUE;
- if (ieee80211_has_a4(hdr->frame_control) &&
- rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- !rx->sdata->u.vlan.sta)
- return RX_DROP_UNUSABLE;
+ if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
+ switch (rx->sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ if (!rx->sdata->u.vlan.sta)
+ return RX_DROP_UNUSABLE;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!rx->sdata->u.mgd.use_4addr)
+ return RX_DROP_UNUSABLE;
+ break;
+ default:
+ return RX_DROP_UNUSABLE;
+ }
+ }
- if (is_multicast_ether_addr(hdr->addr1) &&
- ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- rx->sdata->u.vlan.sta) ||
- (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
- rx->sdata->u.mgd.use_4addr)))
+ if (is_multicast_ether_addr(hdr->addr1))
return RX_DROP_UNUSABLE;
skb->dev = dev;
sdata->vif.p2p))
return 0;
status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
+ } else {
+ /*
+ * 802.11-2016 Table 9-26 says that for data frames,
+ * A1 must be the BSSID - we've checked that already
+ * but may have accepted the wildcard
+ * (ff:ff:ff:ff:ff:ff).
+ *
+ * It also says:
+ * The BSSID of the Data frame is determined as
+ * follows:
+ * a) If the STA is contained within an AP or is
+ * associated with an AP, the BSSID is the
+ * address currently in use by the STA
+ * contained in the AP.
+ *
+ * So we should not accept data frames with an address
+ * that's multicast.
+ *
+ * Accepting it also opens a security problem because
+ * stations could encrypt it with the GTK and inject
+ * traffic that way.
+ */
+ if (ieee80211_is_data(hdr->frame_control) && multicast)
+ return 0;
}
break;
case NL80211_IFTYPE_WDS:
* least once for the stats anyway.
*/
rcu_read_lock_bh();
+ begin:
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (ct != ignored_conntrack &&
}
NF_CT_STAT_INC(net, searched);
}
+
+ if (get_nulls_value(n) != hash) {
+ NF_CT_STAT_INC(net, search_restart);
+ goto begin;
+ }
+
rcu_read_unlock_bh();
return 0;
handler = &sip_handlers[i];
if (handler->request == NULL)
continue;
- if (*datalen < handler->len ||
+ if (*datalen < handler->len + 2 ||
strnicmp(*dptr, handler->method, handler->len))
continue;
+ if ((*dptr)[handler->len] != ' ' ||
+ !isalpha((*dptr)[handler->len+1]))
+ continue;
if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
&matchoff, &matchlen) <= 0)
char *pkblk_start;
char *pkblk_end;
int kblk_size;
+ unsigned int max_frame_len;
unsigned int knum_blocks;
uint64_t knxt_seq_num;
char *prev;
p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
+ p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
prb_init_ft_ops(p1, req_u);
prb_setup_retire_blk_timer(po, tx_ring);
prb_open_block(p1, pbd);
f->arr[f->num_members] = sk;
smp_wmb();
f->num_members++;
+ if (f->num_members == 1)
+ dev_add_pack(&f->prot_hook);
spin_unlock(&f->lock);
}
BUG_ON(i >= f->num_members);
f->arr[i] = f->arr[f->num_members - 1];
f->num_members--;
+ if (f->num_members == 0)
+ __dev_remove_pack(&f->prot_hook);
spin_unlock(&f->lock);
}
return -EINVAL;
}
+ mutex_lock(&fanout_mutex);
+
+ err = -EINVAL;
if (!po->running)
- return -EINVAL;
+ goto out;
+ err = -EALREADY;
if (po->fanout)
- return -EALREADY;
+ goto out;
- mutex_lock(&fanout_mutex);
match = NULL;
list_for_each_entry(f, &fanout_list, list) {
if (f->id == id &&
match->prot_hook.func = packet_rcv_fanout;
match->prot_hook.af_packet_priv = match;
match->prot_hook.id_match = match_fanout_group;
- dev_add_pack(&match->prot_hook);
list_add(&match->list, &fanout_list);
}
err = -EINVAL;
return err;
}
-static void fanout_release(struct sock *sk)
+/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
+ * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
+ * It is the responsibility of the caller to call fanout_release_data() and
+ * free the returned packet_fanout (after synchronize_net())
+ */
+static struct packet_fanout *fanout_release(struct sock *sk)
{
struct packet_sock *po = pkt_sk(sk);
struct packet_fanout *f;
+ mutex_lock(&fanout_mutex);
f = po->fanout;
- if (!f)
- return;
-
- po->fanout = NULL;
+ if (f) {
+ po->fanout = NULL;
- mutex_lock(&fanout_mutex);
- if (atomic_dec_and_test(&f->sk_ref)) {
- list_del(&f->list);
- dev_remove_pack(&f->prot_hook);
- kfree(f);
+ if (atomic_dec_and_test(&f->sk_ref))
+ list_del(&f->list);
+ else
+ f = NULL;
}
mutex_unlock(&fanout_mutex);
+
+ return f;
}
static const struct proto_ops packet_ops;
if ((int)snaplen < 0)
snaplen = 0;
}
+ } else if (unlikely(macoff + snaplen >
+ GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
+ u32 nval;
+
+ nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
+ pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
+ snaplen, nval, macoff);
+ snaplen = nval;
+ if (unlikely((int)snaplen < 0)) {
+ snaplen = 0;
+ macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
+ }
}
spin_lock(&sk->sk_receive_queue.lock);
h.raw = packet_current_rx_frame(po, skb,
{
struct sock *sk = sock->sk;
struct packet_sock *po;
+ struct packet_fanout *f;
struct net *net;
union tpacket_req_u req_u;
packet_set_ring(sk, &req_u, 1, 1);
}
- fanout_release(sk);
+ f = fanout_release(sk);
synchronize_net();
+
+ kfree(f);
+
/*
* Now the socket is dead. No more input will appear.
*/
if (optlen != sizeof(val))
return -EINVAL;
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
- return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
switch (val) {
case TPACKET_V1:
case TPACKET_V2:
case TPACKET_V3:
- po->tp_version = val;
- return 0;
+ break;
default:
return -EINVAL;
}
+ lock_sock(sk);
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+ ret = -EBUSY;
+ } else {
+ po->tp_version = val;
+ ret = 0;
+ }
+ release_sock(sk);
+ return ret;
}
case PACKET_RESERVE:
{
return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
+ if (val > INT_MAX)
+ return -EINVAL;
po->tp_reserve = val;
return 0;
}
/* Added to avoid minimal code churn */
struct tpacket_req *req = &req_u->req;
+ lock_sock(sk);
/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
WARN(1, "Tx-ring is not supported.\n");
goto out;
if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
goto out;
+ if (po->tp_version >= TPACKET_V3 &&
+ req->tp_block_size <=
+ BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
+ goto out;
if (unlikely(req->tp_frame_size < po->tp_hdrlen +
po->tp_reserve))
goto out;
rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
if (unlikely(rb->frames_per_block <= 0))
goto out;
+ if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
+ goto out;
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
req->tp_frame_nr))
goto out;
goto out;
}
- lock_sock(sk);
/* Detach socket from network */
spin_lock(&po->bind_lock);
if (!tx_ring)
prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
}
- release_sock(sk);
if (pg_vec)
free_pg_vec(pg_vec, order, req->tp_block_nr);
out:
+ release_sock(sk);
return err;
}
rose_frames_acked(sk, nr);
if (ns == rose->vr) {
rose_start_idletimer(sk);
- if (sock_queue_rcv_skb(sk, skb) == 0) {
+ if (sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) == 0 &&
+ __sock_queue_rcv_skb(sk, skb) == 0) {
rose->vr = (rose->vr + 1) % ROSE_MODULUS;
queued = 1;
} else {
unsigned *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned toklen = *_toklen, n_parts, loop, tmp;
+ unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
/* there must be at least one name, and at least #names+1 length
* words */
toklen -= 4;
if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
return -EINVAL;
- if (tmp > toklen)
+ paddedlen = (tmp + 3) & ~3;
+ if (paddedlen > toklen)
return -EINVAL;
princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
if (!princ->name_parts[loop])
return -ENOMEM;
memcpy(princ->name_parts[loop], xdr, tmp);
princ->name_parts[loop][tmp] = 0;
- tmp = (tmp + 3) & ~3;
- toklen -= tmp;
- xdr += tmp >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
}
if (toklen < 4)
toklen -= 4;
if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
return -EINVAL;
- if (tmp > toklen)
+ paddedlen = (tmp + 3) & ~3;
+ if (paddedlen > toklen)
return -EINVAL;
princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
if (!princ->realm)
return -ENOMEM;
memcpy(princ->realm, xdr, tmp);
princ->realm[tmp] = 0;
- tmp = (tmp + 3) & ~3;
- toklen -= tmp;
- xdr += tmp >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
_debug("%s/...@%s", princ->name_parts[0], princ->realm);
unsigned *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned toklen = *_toklen, len;
+ unsigned int toklen = *_toklen, len, paddedlen;
/* there must be at least one tag and one length word */
if (toklen <= 8)
toklen -= 8;
if (len > max_data_size)
return -EINVAL;
+ paddedlen = (len + 3) & ~3;
+ if (paddedlen > toklen)
+ return -EINVAL;
td->data_len = len;
if (len > 0) {
if (!td->data)
return -ENOMEM;
memcpy(td->data, xdr, len);
- len = (len + 3) & ~3;
- toklen -= len;
- xdr += len >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
}
_debug("tag %x len %x", td->tag, td->data_len);
const __be32 **_xdr, unsigned *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned toklen = *_toklen, len;
+ unsigned int toklen = *_toklen, len, paddedlen;
/* there must be at least one length word */
if (toklen <= 4)
toklen -= 4;
if (len > AFSTOKEN_K5_TIX_MAX)
return -EINVAL;
+ paddedlen = (len + 3) & ~3;
+ if (paddedlen > toklen)
+ return -EINVAL;
*_tktlen = len;
_debug("ticket len %u", len);
if (!*_ticket)
return -ENOMEM;
memcpy(*_ticket, xdr, len);
- len = (len + 3) & ~3;
- toklen -= len;
- xdr += len >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
}
*_xdr = xdr;
{
const __be32 *xdr = data, *token;
const char *cp;
- unsigned len, tmp, loop, ntoken, toklen, sec_ix;
+ unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
int ret;
_enter(",{%x,%x,%x,%x},%zu",
if (len < 1 || len > AFSTOKEN_CELL_MAX)
goto not_xdr;
datalen -= 4;
- tmp = (len + 3) & ~3;
- if (tmp > datalen)
+ paddedlen = (len + 3) & ~3;
+ if (paddedlen > datalen)
goto not_xdr;
cp = (const char *) xdr;
for (loop = 0; loop < len; loop++)
if (!isprint(cp[loop]))
goto not_xdr;
- if (len < tmp)
- for (; loop < tmp; loop++)
- if (cp[loop])
- goto not_xdr;
+ for (; loop < paddedlen; loop++)
+ if (cp[loop])
+ goto not_xdr;
_debug("cellname: [%u/%u] '%*.*s'",
- len, tmp, len, len, (const char *) xdr);
- datalen -= tmp;
- xdr += tmp >> 2;
+ len, paddedlen, len, len, (const char *) xdr);
+ datalen -= paddedlen;
+ xdr += paddedlen >> 2;
/* get the token count */
if (datalen < 12)
sec_ix = ntohl(*xdr);
datalen -= 4;
_debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
- if (toklen < 20 || toklen > datalen)
+ paddedlen = (toklen + 3) & ~3;
+ if (toklen < 20 || toklen > datalen || paddedlen > datalen)
goto not_xdr;
- datalen -= (toklen + 3) & ~3;
- xdr += (toklen + 3) >> 2;
+ datalen -= paddedlen;
+ xdr += paddedlen >> 2;
} while (--loop > 0);
goto nla_put_failure;
err = a->ops->walk(skb, &dcb, RTM_DELACTION, a);
- if (err < 0)
+ if (err <= 0)
goto nla_put_failure;
- if (err == 0)
- goto noflush_out;
nla_nest_end(skb, nest);
nlmsg_failure:
module_put(a->ops->owner);
err_out:
-noflush_out:
kfree_skb(skb);
kfree(a);
return err;
int hl = ihl + jhl;
if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
- (skb_cloned(skb) &&
- !skb_clone_writable(skb, hl + ntkoff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+ skb_try_make_writable(skb, hl + ntkoff))
return NULL;
else
return (void *)(skb_network_header(skb) + ihl);
}
if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
- if (skb_cloned(skb) &&
- !skb_clone_writable(skb, sizeof(*iph) + ntkoff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
goto fail;
ip_send_check(iph);
addr = iph->daddr;
if (!((old_addr ^ addr) & mask)) {
- if (skb_cloned(skb) &&
- !skb_clone_writable(skb, sizeof(*iph) + noff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ if (skb_try_make_writable(skb, sizeof(*iph) + noff))
goto drop;
new_addr &= mask;
struct tcphdr *tcph;
if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) ||
- (skb_cloned(skb) &&
- !skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+ skb_try_make_writable(skb, ihl + sizeof(*tcph) + noff))
goto drop;
tcph = (void *)(skb_network_header(skb) + ihl);
struct udphdr *udph;
if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) ||
- (skb_cloned(skb) &&
- !skb_clone_writable(skb, ihl + sizeof(*udph) + noff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+ skb_try_make_writable(skb, ihl + sizeof(*udph) + noff))
goto drop;
udph = (void *)(skb_network_header(skb) + ihl);
if ((old_addr ^ addr) & mask)
break;
- if (skb_cloned(skb) &&
- !skb_clone_writable(skb, ihl + sizeof(*icmph) +
- sizeof(*iph) + noff) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ if (skb_try_make_writable(skb, ihl + sizeof(*icmph) +
+ sizeof(*iph) + noff))
goto drop;
icmph = (void *)(skb_network_header(skb) + ihl);
return 0;
}
+static bool offset_valid(struct sk_buff *skb, int offset)
+{
+ if (offset > 0 && offset > skb->len)
+ return false;
+
+ if (offset < 0 && -offset > skb_headroom(skb))
+ return false;
+
+ return true;
+}
+
static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
if (tkey->offmask) {
char *d, _d;
+ if (!offset_valid(skb, off + tkey->at)) {
+ pr_info("tc filter pedit 'at' offset %d out of bounds\n",
+ off + tkey->at);
+ goto bad;
+ }
d = skb_header_pointer(skb, off + tkey->at, 1,
&_d);
if (!d)
" offset must be on 32 bit boundaries\n");
goto bad;
}
- if (offset > 0 && offset > skb->len) {
- pr_info("tc filter pedit"
- " offset %d can't exceed pkt length %d\n",
- offset, skb->len);
+
+ if (!offset_valid(skb, off + offset)) {
+ pr_info("tc filter pedit offset %d out of bounds\n",
+ offset);
goto bad;
}
unsigned long cl;
unsigned long fh;
int err;
- int tp_created = 0;
+ int tp_created;
replay:
+ tp_created = 0;
+
t = NLMSG_DATA(n);
protocol = TC_H_MIN(t->tcm_info);
prio = TC_H_MAJ(t->tcm_info);
{
unsigned short tag;
- tag = vlan_tx_tag_get(skb);
- if (!tag && __vlan_get_tag(skb, &tag))
- *err = -1;
- else
+ if (vlan_tx_tag_present(skb))
+ dst->value = vlan_tx_tag_get(skb);
+ else if (!__vlan_get_tag(skb, &tag))
dst->value = tag;
+ else
+ *err = -1;
}
pr_debug("dsmark_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
if (p->set_tc_index) {
+ int wlen = skb_network_offset(skb);
+
switch (skb->protocol) {
case htons(ETH_P_IP):
- if (skb_cow_head(skb, sizeof(struct iphdr)))
+ wlen += sizeof(struct iphdr);
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
goto drop;
skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
break;
case htons(ETH_P_IPV6):
- if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
+ wlen += sizeof(struct ipv6hdr);
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
goto drop;
skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
return err;
}
+ sch->qstats.backlog += qdisc_pkt_len(skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
return NULL;
qdisc_bstats_update(sch, skb);
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
sch->q.qlen--;
index = skb->tc_index & (p->indices - 1);
pr_debug("dsmark_reset(sch %p,[qdisc %p])\n", sch, p);
qdisc_reset(p->q);
+ sch->qstats.backlog = 0;
sch->q.qlen = 0;
}
htb_activate(q, cl);
}
+ sch->qstats.backlog += qdisc_pkt_len(skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
ok:
qdisc_bstats_update(sch, skb);
qdisc_unthrottled(sch);
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
sch->q.qlen--;
return skb;
}
unsigned int len;
if (cl->un.leaf.q->ops->drop &&
(len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
+ sch->qstats.backlog -= len;
sch->q.qlen--;
if (!cl->un.leaf.q->q.qlen)
htb_deactivate(q, cl);
}
cl->prio_activity = 0;
cl->cmode = HTB_CAN_SEND;
-
}
}
qdisc_watchdog_cancel(&q->watchdog);
__skb_queue_purge(&q->direct_queue);
sch->q.qlen = 0;
+ sch->qstats.backlog = 0;
memset(q->row, 0, sizeof(q->row));
memset(q->row_mask, 0, sizeof(q->row_mask));
memset(q->wait_pq, 0, sizeof(q->wait_pq));
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+ newnp->ipv6_mc_list = NULL;
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
rcu_read_lock();
opt = rcu_dereference(np->opt);
* This way the whole message is queued up and bundling if
* encouraged for small fragments.
*/
-static int sctp_cmd_send_msg(struct sctp_association *asoc,
- struct sctp_datamsg *msg)
+static void sctp_cmd_send_msg(struct sctp_association *asoc,
+ struct sctp_datamsg *msg)
{
struct sctp_chunk *chunk;
- int error = 0;
-
- list_for_each_entry(chunk, &msg->chunks, frag_list) {
- error = sctp_outq_tail(&asoc->outqueue, chunk);
- if (error)
- break;
- }
- return error;
+ list_for_each_entry(chunk, &msg->chunks, frag_list)
+ sctp_outq_tail(&asoc->outqueue, chunk);
}
sctp_outq_cork(&asoc->outqueue);
local_cork = 1;
}
- error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
+ sctp_cmd_send_msg(asoc, cmd->obj.msg);
break;
case SCTP_CMD_SEND_NEXT_ASCONF:
sctp_cmd_send_asconf(asoc);
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
+ /* Report violation if chunk len overflows */
+ ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+ if (ch_end > skb_tail_pointer(skb))
+ return sctp_sf_violation_chunklen(ep, asoc, type, arg,
+ commands);
+
/* Now that we know we at least have a chunk header,
* do things that are type appropriate.
*/
}
}
- /* Report violation if chunk len overflows */
- ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
- if (ch_end > skb_tail_pointer(skb))
- return sctp_sf_violation_chunklen(ep, asoc, type, arg,
- commands);
-
ch = (sctp_chunkhdr_t *) ch_end;
} while (ch_end < skb_tail_pointer(skb));
timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
- err = sctp_wait_for_connect(asoc, &timeo);
- if ((err == 0 || err == -EINPROGRESS) && assoc_id)
+ if (assoc_id)
*assoc_id = asoc->assoc_id;
+ err = sctp_wait_for_connect(asoc, &timeo);
+ /* Note: the asoc may be freed after the return of
+ * sctp_wait_for_connect.
+ */
/* Don't free association on exit. */
asoc = NULL;
static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
int __user *optlen)
{
- if (len <= 0)
+ if (len == 0)
return -EINVAL;
if (len > sizeof(struct sctp_event_subscribe))
len = sizeof(struct sctp_event_subscribe);
struct sctp_af *af;
int err = 0;
+ /* If there is a thread waiting on more sndbuf space for
+ * sending on this asoc, it cannot be peeled.
+ */
+ if (waitqueue_active(&asoc->wait))
+ return -EBUSY;
+
/* An association cannot be branched off from an already peeled-off
* socket, nor is this supported for tcp style sockets.
*/
if (get_user(len, optlen))
return -EFAULT;
+ if (len < 0)
+ return -EINVAL;
+
sctp_lock_sock(sk);
switch (optname) {
*/
sctp_release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
- BUG_ON(sk != asoc->base.sk);
sctp_lock_sock(sk);
*timeo_p = current_timeo;
static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
struct msghdr *msg_sys, unsigned flags,
- struct used_address *used_address)
+ struct used_address *used_address, int *residue)
{
struct compat_msghdr __user *msg_compat =
(struct compat_msghdr __user *)msg;
memcpy(&used_address->name, msg_sys->msg_name,
used_address->name_len);
}
+ if (residue && err >= 0)
+ *residue = total_len - err;
out_freectl:
if (ctl_buf != ctl)
if (!sock)
goto out;
- err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
+ err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL, NULL);
fput_light(sock->file, fput_needed);
out:
struct compat_mmsghdr __user *compat_entry;
struct msghdr msg_sys;
struct used_address used_address;
+ int residue;
if (vlen > UIO_MAXIOV)
vlen = UIO_MAXIOV;
while (datagrams < vlen) {
if (MSG_CMSG_COMPAT & flags) {
err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
- &msg_sys, flags, &used_address);
+ &msg_sys, flags, &used_address,
+ &residue);
if (err < 0)
break;
err = __put_user(err, &compat_entry->msg_len);
} else {
err = ___sys_sendmsg(sock,
(struct msghdr __user *)entry,
- &msg_sys, flags, &used_address);
+ &msg_sys, flags, &used_address,
+ &residue);
if (err < 0)
break;
err = put_user(err, &entry->msg_len);
if (err)
break;
++datagrams;
+ if (residue)
+ break;
}
fput_light(sock->file, fput_needed);
return err;
err = sock_error(sock->sk);
- if (err)
+ if (err) {
+ datagrams = err;
goto out_put;
+ }
entry = mmsg;
compat_entry = (struct compat_mmsghdr __user *)mmsg;
case RPC_GSS_PROC_DESTROY:
if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
goto auth_err;
- rsci->h.expiry_time = get_seconds();
+ rsci->h.expiry_time = seconds_since_boot();
set_bit(CACHE_NEGATIVE, &rsci->h.flags);
if (resv->iov_len + 4 > PAGE_SIZE)
goto drop;
*/
#define TIPC_MEDIA_TYPE_ETH 1
+/* Message header sizes from msg.h - duplicated to avoid mutual inclusion */
+#define INT_H_SIZE 40
+#define MAX_H_SIZE 60
+
+/* minimum bearer MTU */
+#define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE)
+
/*
* Destination address structure used by TIPC bearers when sending messages
*
return !b_ptr->media->send_msg(buf, b_ptr, dest);
}
+/* check if device MTU is too low for tipc headers */
+static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve)
+{
+ if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve)
+ return false;
+ netdev_warn(dev, "MTU too low for tipc bearer\n");
+ return true;
+}
+
#endif /* _TIPC_BEARER_H */
read_unlock(&dev_base_lock);
if (!dev)
return -ENODEV;
+ if (tipc_mtu_bad(dev, 0)) {
+ dev_put(dev);
+ return -EINVAL;
+ }
/* Create Ethernet bearer for device */
if (!eb_ptr->bearer)
return NOTIFY_DONE; /* bearer had been disabled */
- eb_ptr->bearer->mtu = dev->mtu;
-
switch (evt) {
case NETDEV_CHANGE:
if (netif_carrier_ok(dev))
tipc_block_bearer(eb_ptr->bearer->name);
break;
case NETDEV_CHANGEMTU:
+ if (tipc_mtu_bad(dev, 0)) {
+ tipc_disable_bearer(eb_ptr->bearer->name);
+ break;
+ }
+ eb_ptr->bearer->mtu = dev->mtu;
+ /* fall through */
case NETDEV_CHANGEADDR:
tipc_block_bearer(eb_ptr->bearer->name);
tipc_continue(eb_ptr->bearer);
if (s) {
struct unix_sock *u = unix_sk(s);
+ BUG_ON(!atomic_long_read(&u->inflight));
BUG_ON(list_empty(&u->link));
if (atomic_long_dec_and_test(&u->inflight))
list_del_init(&u->link);
}
list_del(&cursor);
+ /* Now gc_candidates contains only garbage. Restore original
+ * inflight counters for these as well, and remove the skbuffs
+ * which are creating the cycle(s).
+ */
+ skb_queue_head_init(&hitlist);
+ list_for_each_entry(u, &gc_candidates, link)
+ scan_children(&u->sk, inc_inflight, &hitlist);
+
/*
* not_cycle_list contains those sockets which do not make up a
* cycle. Restore these to the inflight list.
list_move_tail(&u->link, &gc_inflight_list);
}
- /*
- * Now gc_candidates contains only garbage. Restore original
- * inflight counters for these as well, and remove the skbuffs
- * which are creating the cycle(s).
- */
- skb_queue_head_init(&hitlist);
- list_for_each_entry(u, &gc_candidates, link)
- scan_children(&u->sk, inc_inflight, &hitlist);
-
spin_unlock(&unix_gc_lock);
/* Here we are. Hitlist is filled. Die. */
up = nla_data(rp);
ulen = xfrm_replay_state_esn_len(up);
- if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
+ /* Check the overall length and the internal bitmap length to avoid
+ * potential overflow. */
+ if (nla_len(rp) < ulen ||
+ xfrm_replay_state_esn_len(replay_esn) != ulen ||
+ replay_esn->bmp_len != up->bmp_len)
+ return -EINVAL;
+
+ if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
return -EINVAL;
return 0;
WINDOW *prompt_win;
WINDOW *form_win;
PANEL *panel;
- int i, x, y;
+ int i, x, y, lines, columns, win_lines, win_cols;
int res = -1;
int cursor_position = strlen(init);
int cursor_form_win;
char *result = *resultp;
+ getmaxyx(stdscr, lines, columns);
+
if (strlen(init)+1 > *result_len) {
*result_len = strlen(init)+1;
*resultp = result = realloc(result, *result_len);
if (title)
prompt_width = max(prompt_width, strlen(title));
+ win_lines = min(prompt_lines+6, lines-2);
+ win_cols = min(prompt_width+7, columns-2);
+ prompt_lines = max(win_lines-6, 0);
+ prompt_width = max(win_cols-7, 0);
+
/* place dialog in middle of screen */
- y = (LINES-(prompt_lines+4))/2;
- x = (COLS-(prompt_width+4))/2;
+ y = (lines-win_lines)/2;
+ x = (columns-win_cols)/2;
strncpy(result, init, *result_len);
/* create the windows */
- win = newwin(prompt_lines+6, prompt_width+7, y, x);
+ win = newwin(win_lines, win_cols, y, x);
prompt_win = derwin(win, prompt_lines+1, prompt_width, 2, 2);
form_win = derwin(win, 1, prompt_width, prompt_lines+3, 2);
keypad(form_win, TRUE);
static struct key *request_master_key(struct encrypted_key_payload *epayload,
u8 **master_key, size_t *master_keylen)
{
- struct key *mkey = NULL;
+ struct key *mkey = ERR_PTR(-EINVAL);
if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX,
KEY_TRUSTED_PREFIX_LEN)) {
* immediately unlinked.
*/
struct key_type key_type_dead = {
- .name = "dead",
+ .name = ".dead",
};
/*
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
+ } else if ((description[0] == '.') &&
+ (strncmp(type, "keyring", 7) == 0)) {
+ ret = -EPERM;
+ goto error2;
}
/* pull the payload in if one was supplied */
* Create and join an anonymous session keyring or join a named session
* keyring, creating it if necessary. A named session keyring must have Search
* permission for it to be joined. Session keyrings without this permit will
- * be skipped over.
+ * be skipped over. It is not permitted for userspace to create or join
+ * keyrings whose name begin with a dot.
*
* If successful, the ID of the joined session keyring will be returned.
*/
ret = PTR_ERR(name);
goto error;
}
+
+ ret = -EPERM;
+ if (name[0] == '.')
+ goto error_name;
}
/* join the session */
ret = join_session_keyring(name);
+error_name:
kfree(name);
-
error:
return ret;
}
* Read or set the default keyring in which request_key() will cache keys and
* return the old setting.
*
- * If a process keyring is specified then this will be created if it doesn't
- * yet exist. The old setting will be returned if successful.
+ * If a thread or process keyring is specified then it will be created if it
+ * doesn't yet exist. The old setting will be returned if successful.
*/
long keyctl_set_reqkey_keyring(int reqkey_defl)
{
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
ret = install_process_keyring_to_cred(new);
- if (ret < 0) {
- if (ret != -EEXIST)
- goto error;
- ret = 0;
- }
+ if (ret < 0)
+ goto error;
goto set;
case KEY_REQKEY_DEFL_DEFAULT:
if (keyring->type != &key_type_keyring)
goto error;
+ if (!match)
+ return ERR_PTR(-ENOKEY);
+
rcu_read_lock();
now = current_kernel_time();
struct key_type *type,
const char *description)
{
- if (!type->match)
- return ERR_PTR(-ENOKEY);
-
return keyring_search_aux(keyring, current->cred,
type, description, type->match, false);
}
}
/*
- * Install a fresh thread keyring directly to new credentials. This keyring is
- * allowed to overrun the quota.
+ * Install a thread keyring to the given credentials struct if it didn't have
+ * one already. This is allowed to overrun the quota.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
*/
int install_thread_keyring_to_cred(struct cred *new)
{
struct key *keyring;
+ if (new->thread_keyring)
+ return 0;
+
keyring = keyring_alloc("_tid", new->uid, new->gid, new,
KEY_ALLOC_QUOTA_OVERRUN, NULL);
if (IS_ERR(keyring))
}
/*
- * Install a fresh thread keyring, discarding the old one.
+ * Install a thread keyring to the current task if it didn't have one already.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
*/
static int install_thread_keyring(void)
{
if (!new)
return -ENOMEM;
- BUG_ON(new->thread_keyring);
-
ret = install_thread_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
}
/*
- * Install a process keyring directly to a credentials struct.
+ * Install a process keyring to the given credentials struct if it didn't have
+ * one already. This is allowed to overrun the quota.
*
- * Returns -EEXIST if there was already a process keyring, 0 if one installed,
- * and other value on any other error
+ * Return: 0 if a process keyring is now present; -errno on failure.
*/
int install_process_keyring_to_cred(struct cred *new)
{
int ret;
if (new->tgcred->process_keyring)
- return -EEXIST;
+ return 0;
keyring = keyring_alloc("_pid", new->uid, new->gid,
new, KEY_ALLOC_QUOTA_OVERRUN, NULL);
}
/*
- * Make sure a process keyring is installed for the current process. The
- * existing process keyring is not replaced.
+ * Install a process keyring to the current task if it didn't have one already.
*
- * Returns 0 if there is a process keyring by the end of this function, some
- * error otherwise.
+ * Return: 0 if a process keyring is now present; -errno on failure.
*/
static int install_process_keyring(void)
{
ret = install_process_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
- return ret != -EEXIST ? ret : 0;
+ return ret;
}
return commit_creds(new);
}
/*
- * Install a session keyring directly to a credentials struct.
+ * Install the given keyring as the session keyring of the given credentials
+ * struct, replacing the existing one if any. If the given keyring is NULL,
+ * then install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
*/
int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
{
}
/*
- * Install a session keyring, discarding the old one. If a keyring is not
- * supplied, an empty one is invented.
+ * Install the given keyring as the session keyring of the current task,
+ * replacing the existing one if any. If the given keyring is NULL, then
+ * install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
*/
static int install_session_keyring(struct key *keyring)
{
if (substream->timer_running)
snd_timer_interrupt(substream->timer, 1);
_end:
- snd_pcm_stream_unlock_irqrestore(substream, flags);
if (runtime->transfer_ack_end)
runtime->transfer_ack_end(substream);
kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
+ snd_pcm_stream_unlock_irqrestore(substream, flags);
}
EXPORT_SYMBOL(snd_pcm_period_elapsed);
info.output_pool != client->pool->size)) {
if (snd_seq_write_pool_allocated(client)) {
/* remove all existing cells */
+ snd_seq_pool_mark_closing(client->pool);
snd_seq_queue_client_leave_cells(client->number);
snd_seq_pool_done(client->pool);
}
return;
*fifo = NULL;
+ if (f->pool)
+ snd_seq_pool_mark_closing(f->pool);
+
snd_seq_fifo_clear(f);
/* wake up clients if any */
f->tail = cell;
if (f->head == NULL)
f->head = cell;
+ cell->next = NULL;
f->cells++;
spin_unlock_irqrestore(&f->lock, flags);
spin_lock_irqsave(&f->lock, flags);
cell->next = f->head;
f->head = cell;
+ if (!f->tail)
+ f->tail = cell;
f->cells++;
spin_unlock_irqrestore(&f->lock, flags);
}
/* NOTE: overflow flag is not cleared */
spin_unlock_irqrestore(&f->lock, flags);
+ /* close the old pool and wait until all users are gone */
+ snd_seq_pool_mark_closing(oldpool);
+ snd_use_lock_sync(&f->use_lock);
+
/* release cells in old pool */
for (cell = oldhead; cell; cell = next) {
next = cell->next;
/* wait until all locks are released */
void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
{
- int max_count = 5 * HZ;
+ int warn_count = 5 * HZ;
if (atomic_read(lockp) < 0) {
printk(KERN_WARNING "seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line);
return;
}
while (atomic_read(lockp) > 0) {
- if (max_count == 0) {
- snd_printk(KERN_WARNING "seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line);
- break;
- }
+ if (warn_count-- == 0)
+ pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line);
schedule_timeout_uninterruptible(1);
- max_count--;
}
}
return 0;
}
+/* refuse the further insertion to the pool */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
+{
+ unsigned long flags;
+
+ if (snd_BUG_ON(!pool))
+ return;
+ spin_lock_irqsave(&pool->lock, flags);
+ pool->closing = 1;
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
/* remove events */
int snd_seq_pool_done(struct snd_seq_pool *pool)
{
unsigned long flags;
struct snd_seq_event_cell *ptr;
- int max_count = 5 * HZ;
if (snd_BUG_ON(!pool))
return -EINVAL;
/* wait for closing all threads */
- spin_lock_irqsave(&pool->lock, flags);
- pool->closing = 1;
- spin_unlock_irqrestore(&pool->lock, flags);
-
if (waitqueue_active(&pool->output_sleep))
wake_up(&pool->output_sleep);
- while (atomic_read(&pool->counter) > 0) {
- if (max_count == 0) {
- snd_printk(KERN_WARNING "snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
- break;
- }
+ while (atomic_read(&pool->counter) > 0)
schedule_timeout_uninterruptible(1);
- max_count--;
- }
/* release all resources */
spin_lock_irqsave(&pool->lock, flags);
*ppool = NULL;
if (pool == NULL)
return 0;
+ snd_seq_pool_mark_closing(pool);
snd_seq_pool_done(pool);
kfree(pool);
return 0;
int snd_seq_pool_init(struct snd_seq_pool *pool);
/* done pool - free events */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool);
int snd_seq_pool_done(struct snd_seq_pool *pool);
/* create pool */
}
}
+static void queue_use(struct snd_seq_queue *queue, int client, int use);
+
/* allocate a new queue -
* return queue index value or negative value for error
*/
if (q == NULL)
return -ENOMEM;
q->info_flags = info_flags;
+ queue_use(q, client, 1);
if (queue_list_add(q) < 0) {
queue_delete(q);
return -ENOMEM;
}
- snd_seq_queue_use(q->queue, client, 1); /* use this queue */
return q->queue;
}
return result;
}
-
-/* use or unuse this queue -
- * if it is the first client, starts the timer.
- * if it is not longer used by any clients, stop the timer.
- */
-int snd_seq_queue_use(int queueid, int client, int use)
+/* use or unuse this queue */
+static void queue_use(struct snd_seq_queue *queue, int client, int use)
{
- struct snd_seq_queue *queue;
-
- queue = queueptr(queueid);
- if (queue == NULL)
- return -EINVAL;
- mutex_lock(&queue->timer_mutex);
if (use) {
if (!test_and_set_bit(client, queue->clients_bitmap))
queue->clients++;
} else {
snd_seq_timer_close(queue);
}
+}
+
+/* use or unuse this queue -
+ * if it is the first client, starts the timer.
+ * if it is not longer used by any clients, stop the timer.
+ */
+int snd_seq_queue_use(int queueid, int client, int use)
+{
+ struct snd_seq_queue *queue;
+
+ queue = queueptr(queueid);
+ if (queue == NULL)
+ return -EINVAL;
+ mutex_lock(&queue->timer_mutex);
+ queue_use(queue, client, use);
mutex_unlock(&queue->timer_mutex);
queuefree(queue);
return 0;
if (err < 0)
goto __err;
+ tu->qhead = tu->qtail = tu->qused = 0;
kfree(tu->queue);
tu->queue = NULL;
kfree(tu->tqueue);
return -EBADFD;
if (copy_from_user(¶ms, _params, sizeof(params)))
return -EFAULT;
- if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) {
- err = -EINVAL;
- goto _end;
+ if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE)) {
+ u64 resolution;
+
+ if (params.ticks < 1) {
+ err = -EINVAL;
+ goto _end;
+ }
+
+ /* Don't allow resolution less than 1ms */
+ resolution = snd_timer_resolution(tu->timeri);
+ resolution *= params.ticks;
+ if (resolution < 1000000) {
+ err = -EINVAL;
+ goto _end;
+ }
}
if (params.queue_size > 0 &&
(params.queue_size < 32 || params.queue_size > 1024)) {
tu = file->private_data;
unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
+ mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
while ((long)count - result >= unit) {
while (!tu->qused) {
add_wait_queue(&tu->qchange_sleep, &wait);
spin_unlock_irq(&tu->qlock);
+ mutex_unlock(&tu->ioctl_lock);
schedule();
+ mutex_lock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
remove_wait_queue(&tu->qchange_sleep, &wait);
tu->qused--;
spin_unlock_irq(&tu->qlock);
- mutex_lock(&tu->ioctl_lock);
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[qhead],
sizeof(struct snd_timer_tread)))
sizeof(struct snd_timer_read)))
err = -EFAULT;
}
- mutex_unlock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
if (err < 0)
}
_error:
spin_unlock_irq(&tu->qlock);
+ mutex_unlock(&tu->ioctl_lock);
return result > 0 ? result : err;
}
spin_unlock(&codec->reg_lock);
snd_ali_printk("playback pointer returned cso=%xh.\n", cso);
+ cso %= runtime->buffer_size;
return cso;
}
cso = inw(ALI_REG(codec, ALI_CSO_ALPHA_FMS + 2));
spin_unlock(&codec->reg_lock);
+ cso %= runtime->buffer_size;
return cso;
}
#include "cthw20k1.h"
#include "ct20k1reg.h"
-#if BITS_PER_LONG == 32
-#define CT_XFI_DMA_MASK DMA_BIT_MASK(32) /* 32 bit PTE */
-#else
-#define CT_XFI_DMA_MASK DMA_BIT_MASK(64) /* 64 bit PTE */
-#endif
-
struct hw20k1 {
struct hw hw;
spinlock_t reg_20k1_lock;
{
int err;
struct pci_dev *pci = hw->pci;
+ const unsigned int dma_bits = BITS_PER_LONG;
err = pci_enable_device(pci);
if (err < 0)
return err;
/* Set DMA transfer mask */
- if (pci_set_dma_mask(pci, CT_XFI_DMA_MASK) < 0 ||
- pci_set_consistent_dma_mask(pci, CT_XFI_DMA_MASK) < 0) {
- printk(KERN_ERR "architecture does not support PCI "
- "busmaster DMA with mask 0x%llx\n",
- CT_XFI_DMA_MASK);
- err = -ENXIO;
- goto error1;
+ if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
+ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits));
+ } else {
+ dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
+ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32));
}
if (!hw->io_base) {
#include "cthw20k2.h"
#include "ct20k2reg.h"
-#if BITS_PER_LONG == 32
-#define CT_XFI_DMA_MASK DMA_BIT_MASK(32) /* 32 bit PTE */
-#else
-#define CT_XFI_DMA_MASK DMA_BIT_MASK(64) /* 64 bit PTE */
-#endif
-
struct hw20k2 {
struct hw hw;
/* for i2c */
int err = 0;
struct pci_dev *pci = hw->pci;
unsigned int gctl;
+ const unsigned int dma_bits = BITS_PER_LONG;
err = pci_enable_device(pci);
if (err < 0)
return err;
/* Set DMA transfer mask */
- if (pci_set_dma_mask(pci, CT_XFI_DMA_MASK) < 0 ||
- pci_set_consistent_dma_mask(pci, CT_XFI_DMA_MASK) < 0) {
- printk(KERN_ERR "ctxfi: architecture does not support PCI "
- "busmaster DMA with mask 0x%llx\n", CT_XFI_DMA_MASK);
- err = -ENXIO;
- goto error1;
+ if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
+ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits));
+ } else {
+ dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
+ dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32));
}
if (!hw->io_base) {
if (! snd_usb_parse_audio_interface(chip, interface)) {
usb_set_interface(dev, interface, 0); /* reset the current interface */
usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
- return -EINVAL;
}
return 0;
case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
case USB_ID(0x046d, 0x0991):
+ case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */
/* Most audio usb devices lie about volume resolution.
* Most Logitech webcams have res = 384.
- * Proboly there is some logitech magic behind this number --fishor
+ * Probably there is some logitech magic behind this number --fishor
*/
if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
snd_printk(KERN_INFO
}
static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer,
+ struct usb_mixer_elem_info *cval,
struct snd_kcontrol *kctl)
{
/* Approximation using 10 ranges based on output measurement on hw v1.2.
41, 50, TLV_DB_MINMAX_ITEM(-441, 0),
);
- dev_info(&mixer->chip->dev->dev, "applying DragonFly dB scale quirk\n");
- kctl->tlv.p = scale;
- kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ;
- kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
+ if (cval->min == 0 && cval->max == 50) {
+ dev_info(&mixer->chip->dev->dev, "applying DragonFly dB scale quirk (0-50 variant)\n");
+ kctl->tlv.p = scale;
+ kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ;
+ kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
+
+ } else if (cval->min == 0 && cval->max <= 1000) {
+ /* Some other clearly broken DragonFly variant.
+ * At least a 0..53 variant (hw v1.0) exists.
+ */
+ dev_info(&mixer->chip->dev->dev, "ignoring too narrow dB range on a DragonFly device");
+ kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
+ }
}
void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
{
switch (mixer->chip->usb_id) {
case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
- if (unitid == 7 && cval->min == 0 && cval->max == 50)
- snd_dragonfly_quirk_db_scale(mixer, kctl);
+ if (unitid == 7 && cval->control == UAC_FU_VOLUME)
+ snd_dragonfly_quirk_db_scale(mixer, cval, kctl);
break;
}
}
AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+/* Syntek STK1160 */
+{
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .idVendor = 0x05e1,
+ .idProduct = 0x0408,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Syntek",
+ .product_name = "STK1160",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER
+ }
+},
+
/* Digidesign Mbox */
{
/* Thanks to Clemens Ladisch <clemens@ladisch.de> */
'perf script report <script> [args]' to run and display the results
of <script>. <script> is the name displayed in the output of 'perf
- trace --list' i.e. the actual script name minus any language
+ script --list' i.e. the actual script name minus any language
extension. The perf.data output from a previous run of 'perf script
record <script>' is used and should be present for this command to
succeed. [args] refers to the (mainly optional) args expected by
Any command you can specify in a shell.
-D::
---dump-raw-script=::
+--dump-raw-trace=::
Display verbose dump of the trace data.
-L::
/* Last entry */
if (curr->end == curr->start)
- curr->end = roundup(curr->start, 4096);
+ curr->end = roundup(curr->start, 4096) + 4096;
}
static void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
if (err)
die("error registering py script extension");
- scripting_context = malloc(sizeof(struct scripting_context));
+ if (scripting_context == NULL)
+ scripting_context = malloc(sizeof(*scripting_context));
}
#ifdef NO_LIBPYTHON
if (err)
die("error registering pl script extension");
- scripting_context = malloc(sizeof(struct scripting_context));
+ if (scripting_context == NULL)
+ scripting_context = malloc(sizeof(*scripting_context));
}
#ifdef NO_LIBPERL
*/
case 0x2C: /* Westmere EP - Gulftown */
cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;
+ break;
case 0x2A: /* SNB */
case 0x2D: /* SNB Xeon */
cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;