F: drivers/gpio/
F: include/linux/gpio*
+GRE DEMULTIPLEXER DRIVER
+M: Dmitry Kozlov <xeb@mail.ru>
+L: netdev@vger.kernel.org
+S: Maintained
+F: net/ipv4/gre.c
+F: include/net/gre.h
+
GRETH 10/100/1G Ethernet MAC device driver
M: Kristoffer Glembo <kristoffer@gaisler.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/greth*
-HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER
-M: Frank Seidel <frank@f-seidel.de>
-L: platform-driver-x86@vger.kernel.org
-W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/
-S: Maintained
-F: drivers/platform/x86/hdaps.c
-
-HWPOISON MEMORY FAILURE HANDLING
-M: Andi Kleen <andi@firstfloor.org>
-L: linux-mm@kvack.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison
-S: Maintained
-F: mm/memory-failure.c
-F: mm/hwpoison-inject.c
-
-HYPERVISOR VIRTUAL CONSOLE DRIVER
-L: linuxppc-dev@lists.ozlabs.org
-S: Odd Fixes
-F: drivers/tty/hvc/
-
-iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER
-M: Peter Jones <pjones@redhat.com>
-M: Konrad Rzeszutek Wilk <konrad@kernel.org>
-S: Maintained
-F: drivers/firmware/iscsi_ibft*
-
GSPCA FINEPIX SUBDRIVER
M: Frank Zago <frank@zago.net>
L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/video/gspca/
+HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER
+M: Frank Seidel <frank@f-seidel.de>
+L: platform-driver-x86@vger.kernel.org
+W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/
+S: Maintained
+F: drivers/platform/x86/hdaps.c
+
+HWPOISON MEMORY FAILURE HANDLING
+M: Andi Kleen <andi@firstfloor.org>
+L: linux-mm@kvack.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison
+S: Maintained
+F: mm/memory-failure.c
+F: mm/hwpoison-inject.c
+
+HYPERVISOR VIRTUAL CONSOLE DRIVER
+L: linuxppc-dev@lists.ozlabs.org
+S: Odd Fixes
+F: drivers/tty/hvc/
+
HARDWARE MONITORING
M: Jean Delvare <khali@linux-fr.org>
M: Guenter Roeck <guenter.roeck@ericsson.com>
F: drivers/pnp/isapnp/
F: include/linux/isapnp.h
+iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER
+M: Peter Jones <pjones@redhat.com>
+M: Konrad Rzeszutek Wilk <konrad@kernel.org>
+S: Maintained
+F: drivers/firmware/iscsi_ibft*
+
ISCSI
M: Mike Christie <michaelc@cs.wisc.edu>
L: open-iscsi@googlegroups.com
F: drivers/pps/
F: include/linux/pps*.h
+PPTP DRIVER
+M: Dmitry Kozlov <xeb@mail.ru>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/pptp.c
+W: http://sourceforge.net/projects/accel-pptp
+
PREEMPTIBLE KERNEL
M: Robert Love <rml@tech9.net>
L: kpreempt-tech@lists.sourceforge.net
S: Maintained
F: drivers/tty/serial/zs.*
-GRE DEMULTIPLEXER DRIVER
-M: Dmitry Kozlov <xeb@mail.ru>
-L: netdev@vger.kernel.org
-S: Maintained
-F: net/ipv4/gre.c
-F: include/net/gre.h
-
-PPTP DRIVER
-M: Dmitry Kozlov <xeb@mail.ru>
-L: netdev@vger.kernel.org
-S: Maintained
-F: drivers/net/pptp.c
-W: http://sourceforge.net/projects/accel-pptp
-
THE REST
M: Linus Torvalds <torvalds@linux-foundation.org>
L: linux-kernel@vger.kernel.org
#define __NR_fanotify_init 494
#define __NR_fanotify_mark 495
#define __NR_prlimit64 496
+#define __NR_name_to_handle_at 497
+#define __NR_open_by_handle_at 498
+#define __NR_clock_adjtime 499
+#define __NR_syncfs 500
#ifdef __KERNEL__
-#define NR_SYSCALLS 497
+#define NR_SYSCALLS 501
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
.quad sys_ni_syscall /* sys_timerfd */
.quad sys_eventfd
.quad sys_recvmmsg
- .quad sys_fallocate /* 480 */
+ .quad sys_fallocate /* 480 */
.quad sys_timerfd_create
.quad sys_timerfd_settime
.quad sys_timerfd_gettime
.quad sys_signalfd4
- .quad sys_eventfd2 /* 485 */
+ .quad sys_eventfd2 /* 485 */
.quad sys_epoll_create1
.quad sys_dup3
.quad sys_pipe2
.quad sys_inotify_init1
- .quad sys_preadv /* 490 */
+ .quad sys_preadv /* 490 */
.quad sys_pwritev
.quad sys_rt_tgsigqueueinfo
.quad sys_perf_event_open
.quad sys_fanotify_init
- .quad sys_fanotify_mark /* 495 */
+ .quad sys_fanotify_mark /* 495 */
.quad sys_prlimit64
+ .quad sys_name_to_handle_at
+ .quad sys_open_by_handle_at
+ .quad sys_clock_adjtime
+ .quad sys_syncfs /* 500 */
.size sys_call_table, . - sys_call_table
.type sys_call_table, @object
static inline void register_rpcc_clocksource(long cycle_freq)
{
- clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4);
- clocksource_register(&clocksource_rpcc);
+ clocksource_register_hz(&clocksource_rpcc, cycle_freq);
}
#else /* !CONFIG_SMP */
static inline void register_rpcc_clocksource(long cycle_freq)
ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS)
else
ZTEXTADDR := 0
-ZBSSADDR := ALIGN(4)
+ZBSSADDR := ALIGN(8)
endif
SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
bl cache_on
restart: adr r0, LC0
- ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12}
- ldr sp, [r0, #32]
+ ldmia r0, {r1, r2, r3, r6, r9, r11, r12}
+ ldr sp, [r0, #28]
/*
* We might be running at a different address. We need
* to fix up various pointers.
*/
sub r0, r0, r1 @ calculate the delta offset
- add r5, r5, r0 @ _start
add r6, r6, r0 @ _edata
#ifndef CONFIG_ZBOOT_ROM
/*
* Check to see if we will overwrite ourselves.
* r4 = final kernel address
- * r5 = start of this image
* r9 = size of decompressed image
* r10 = end of this image, including bss/stack/malloc space if non XIP
* We basically want:
- * r4 >= r10 -> OK
- * r4 + image length <= r5 -> OK
+ * r4 - 16k page directory >= r10 -> OK
+ * r4 + image length <= current position (pc) -> OK
*/
+ add r10, r10, #16384
cmp r4, r10
bhs wont_overwrite
add r10, r4, r9
- cmp r10, r5
+ ARM( cmp r10, pc )
+ THUMB( mov lr, pc )
+ THUMB( cmp r10, lr )
bls wont_overwrite
/*
* Relocate ourselves past the end of the decompressed kernel.
- * r5 = start of this image
* r6 = _edata
* r10 = end of the decompressed kernel
* Because we always copy ahead, we need to do it from the end and go
* backward in case the source and destination overlap.
*/
- /* Round up to next 256-byte boundary. */
- add r10, r10, #256
+ /*
+ * Bump to the next 256-byte boundary with the size of
+ * the relocation code added. This avoids overwriting
+ * ourself when the offset is small.
+ */
+ add r10, r10, #((reloc_code_end - restart + 256) & ~255)
bic r10, r10, #255
+ /* Get start of code we want to copy and align it down. */
+ adr r5, restart
+ bic r5, r5, #31
+
sub r9, r6, r5 @ size to copy
add r9, r9, #31 @ rounded up to a multiple
bic r9, r9, #31 @ ... of 32 bytes
/* Preserve offset to relocated code. */
sub r6, r9, r6
+#ifndef CONFIG_ZBOOT_ROM
+ /* cache_clean_flush may use the stack, so relocate it */
+ add sp, sp, r6
+#endif
+
bl cache_clean_flush
adr r0, BSYM(restart)
LC0: .word LC0 @ r1
.word __bss_start @ r2
.word _end @ r3
- .word _start @ r5
.word _edata @ r6
.word _image_size @ r9
.word _got_start @ r11
#endif
.ltorg
+reloc_code_end:
.align
.section ".stack", "aw", %nobits
.bss : { *(.bss) }
_end = .;
+ . = ALIGN(8); /* the stack must be 64-bit aligned */
.stack : { *(.stack) }
.stab 0 : { *(.stab) }
#include <mach/barriers.h>
#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
#define mb() do { dsb(); outer_sync(); } while (0)
-#define rmb() dmb()
+#define rmb() dsb()
#define wmb() mb()
#else
#include <asm/memory.h>
return err;
}
-static inline void setup_syscall_restart(struct pt_regs *regs)
-{
- regs->ARM_r0 = regs->ARM_ORIG_r0;
- regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
-}
-
/*
* OK, we're invoking a handler
*/
static int
handle_signal(unsigned long sig, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset,
- struct pt_regs * regs, int syscall)
+ struct pt_regs * regs)
{
struct thread_info *thread = current_thread_info();
struct task_struct *tsk = current;
int usig = sig;
int ret;
- /*
- * If we were from a system call, check for system call restarting...
- */
- if (syscall) {
- switch (regs->ARM_r0) {
- case -ERESTART_RESTARTBLOCK:
- case -ERESTARTNOHAND:
- regs->ARM_r0 = -EINTR;
- break;
- case -ERESTARTSYS:
- if (!(ka->sa.sa_flags & SA_RESTART)) {
- regs->ARM_r0 = -EINTR;
- break;
- }
- /* fallthrough */
- case -ERESTARTNOINTR:
- setup_syscall_restart(regs);
- }
- }
-
/*
* translate the signal
*/
*/
static void do_signal(struct pt_regs *regs, int syscall)
{
+ unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
struct k_sigaction ka;
siginfo_t info;
int signr;
if (!user_mode(regs))
return;
+ /*
+ * If we were from a system call, check for system call restarting...
+ */
+ if (syscall) {
+ continue_addr = regs->ARM_pc;
+ restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
+ retval = regs->ARM_r0;
+
+ /*
+ * Prepare for system call restart. We do this here so that a
+ * debugger will see the already changed PSW.
+ */
+ switch (retval) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ regs->ARM_r0 = regs->ARM_ORIG_r0;
+ regs->ARM_pc = restart_addr;
+ break;
+ case -ERESTART_RESTARTBLOCK:
+ regs->ARM_r0 = -EINTR;
+ break;
+ }
+ }
+
if (try_to_freeze())
goto no_signal;
+ /*
+ * Get the signal to deliver. When running under ptrace, at this
+ * point the debugger may change all our registers ...
+ */
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
sigset_t *oldset;
+ /*
+ * Depending on the signal settings we may need to revert the
+ * decision to restart the system call. But skip this if a
+ * debugger has chosen to restart at a different PC.
+ */
+ if (regs->ARM_pc == restart_addr) {
+ if (retval == -ERESTARTNOHAND
+ || (retval == -ERESTARTSYS
+ && !(ka.sa.sa_flags & SA_RESTART))) {
+ regs->ARM_r0 = -EINTR;
+ regs->ARM_pc = continue_addr;
+ }
+ }
+
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = ¤t->saved_sigmask;
else
oldset = ¤t->blocked;
- if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) {
+ if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
/*
* A signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
}
no_signal:
- /*
- * No signal to deliver to the process - restart the syscall.
- */
if (syscall) {
- if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) {
+ /*
+ * Handle restarting a different system call. As above,
+ * if a debugger has chosen to restart at a different PC,
+ * ignore the restart.
+ */
+ if (retval == -ERESTART_RESTARTBLOCK
+ && regs->ARM_pc == continue_addr) {
if (thumb_mode(regs)) {
regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
regs->ARM_pc -= 2;
#endif
}
}
- if (regs->ARM_r0 == -ERESTARTNOHAND ||
- regs->ARM_r0 == -ERESTARTSYS ||
- regs->ARM_r0 == -ERESTARTNOINTR) {
- setup_syscall_restart(regs);
- }
/* If there's no signal to deliver, we just put the saved sigmask
* back.
* operation to deadlock the system.
*/
#define mb() dsb()
-#define rmb() dmb()
+#define rmb() dsb()
#define wmb() mb()
#include <asm/outercache.h>
-#define rmb() dmb()
+#define rmb() dsb()
#define wmb() do { dsb(); outer_sync(); } while (0)
#define mb() wmb()
* Convert start_pfn/end_pfn to a struct page pointer.
*/
start_pg = pfn_to_page(start_pfn - 1) + 1;
- end_pg = pfn_to_page(end_pfn);
+ end_pg = pfn_to_page(end_pfn - 1) + 1;
/*
* Convert to physical addresses, and
bank_start = bank_pfn_start(bank);
+#ifdef CONFIG_SPARSEMEM
+ /*
+ * Take care not to free memmap entries that don't exist
+ * due to SPARSEMEM sections which aren't present.
+ */
+ bank_start = min(bank_start,
+ ALIGN(prev_bank_end, PAGES_PER_SECTION));
+#endif
/*
* If we had a previous bank, and there is a space
* between the current bank and the previous, free it.
*/
prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
}
+
+#ifdef CONFIG_SPARSEMEM
+ if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
+ free_memmap(prev_bank_end,
+ ALIGN(prev_bank_end, PAGES_PER_SECTION));
+#endif
}
static void __init free_highpages(void)
#define _ASM_S390_DIAG_H
/*
- * Diagnose 10: Release pages
+ * Diagnose 10: Release page range
*/
-extern void diag10(unsigned long addr);
+static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn)
+{
+ unsigned long start_addr, end_addr;
+
+ start_addr = start_pfn << PAGE_SHIFT;
+ end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT;
+
+ asm volatile(
+ "0: diag %0,%1,0x10\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ EX_TABLE(1b, 1b)
+ : : "a" (start_addr), "a" (end_addr));
+}
/*
* Diagnose 14: Input spool file manipulation
#ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#endif
- if (current->mm->context.alloc_pgste) {
+ if (current->mm && current->mm->context.alloc_pgste) {
/*
* alloc_pgste indicates, that any NEW context will be created
* with extended page tables. The old context is unchanged. The
#include <linux/module.h>
#include <asm/diag.h>
-/*
- * Diagnose 10: Release pages
- */
-void diag10(unsigned long addr)
-{
- if (addr >= 0x7ff00000)
- return;
- asm volatile(
-#ifdef CONFIG_64BIT
- " sam31\n"
- " diag %0,%0,0x10\n"
- "0: sam64\n"
-#else
- " diag %0,%0,0x10\n"
- "0:\n"
-#endif
- EX_TABLE(0b, 0b)
- : : "a" (addr));
-}
-EXPORT_SYMBOL(diag10);
-
/*
* Diagnose 14: Input spool file manipulation
*/
{ "rp", 0x77, INSTR_S_RD },
{ "stcke", 0x78, INSTR_S_RD },
{ "sacf", 0x79, INSTR_S_RD },
+ { "spp", 0x80, INSTR_S_RD },
{ "stsi", 0x7d, INSTR_S_RD },
{ "srnm", 0x99, INSTR_S_RD },
{ "stfpc", 0x9c, INSTR_S_RD },
stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
basr %r14,0
l %r14,restart_addr-.(%r14)
- br %r14 # branch to start_secondary
+ basr %r14,%r14 # branch to start_secondary
restart_addr:
.long start_secondary
.align 8
mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
- jg start_secondary
+ brasl %r14,start_secondary
.align 8
restart_vtime:
.long 0x7fffffff,0xffffffff
} else
free_page((unsigned long) npa);
}
- diag10(addr);
+ diag10_range(addr >> PAGE_SHIFT, 1);
pa->pages[pa->index++] = addr;
(*counter)++;
spin_unlock(&cmm_lock);
return rc;
}
-long hwsampler_query_min_interval(void)
+unsigned long hwsampler_query_min_interval(void)
{
- if (min_sampler_rate)
- return min_sampler_rate;
- else
- return -EINVAL;
+ return min_sampler_rate;
}
-long hwsampler_query_max_interval(void)
+unsigned long hwsampler_query_max_interval(void)
{
- if (max_sampler_rate)
- return max_sampler_rate;
- else
- return -EINVAL;
+ return max_sampler_rate;
}
unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu)
int hwsampler_shutdown(void);
int hwsampler_allocate(unsigned long sdbt, unsigned long sdb);
int hwsampler_deallocate(void);
-long hwsampler_query_min_interval(void);
-long hwsampler_query_max_interval(void);
+unsigned long hwsampler_query_min_interval(void);
+unsigned long hwsampler_query_max_interval(void);
int hwsampler_start_all(unsigned long interval);
int hwsampler_stop_all(void);
int hwsampler_deactivate(unsigned int cpu);
* create hwsampler files only if hwsampler_setup() succeeds.
*/
oprofile_min_interval = hwsampler_query_min_interval();
- if (oprofile_min_interval < 0) {
- oprofile_min_interval = 0;
+ if (oprofile_min_interval == 0)
return -ENODEV;
- }
oprofile_max_interval = hwsampler_query_max_interval();
- if (oprofile_max_interval < 0) {
- oprofile_max_interval = 0;
+ if (oprofile_max_interval == 0)
return -ENODEV;
- }
if (oprofile_timer_init(ops))
return -ENODEV;
return 0;
}
-static struct of_device_id __initdata apc_match[] = {
+static struct of_device_id apc_match[] = {
{
.name = APC_OBPNAME,
},
return 0;
}
-static struct of_device_id __initdata pmc_match[] = {
+static struct of_device_id pmc_match[] = {
{
.name = PMC_OBPNAME,
},
void __cpuinit smp_store_cpu_info(int id)
{
int cpu_node;
+ int mid;
cpu_data(id).udelay_val = loops_per_jiffy;
cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
"clock-frequency", 0);
cpu_data(id).prom_node = cpu_node;
- cpu_data(id).mid = cpu_get_hwmid(cpu_node);
+ mid = cpu_get_hwmid(cpu_node);
- if (cpu_data(id).mid < 0)
- panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
+ if (mid < 0) {
+ printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
+ mid = 0;
+ }
+ cpu_data(id).mid = mid;
}
void __init smp_cpus_done(unsigned int max_cpus)
return 0;
}
-static struct of_device_id __initdata clock_match[] = {
+static struct of_device_id clock_match[] = {
{
.name = "eeprom",
},
/* Also, handle the alignment code out of band. */
cc_dword_align:
- cmp %g1, 6
- bl,a ccte
+ cmp %g1, 16
+ bge 1f
+ srl %g1, 1, %o3
+2: cmp %o3, 0
+ be,a ccte
andcc %g1, 0xf, %o3
- andcc %o0, 0x1, %g0
+ andcc %o3, %o0, %g0 ! Check %o0 only (%o1 has the same last 2 bits)
+ be,a 2b
+ srl %o3, 1, %o3
+1: andcc %o0, 0x1, %g0
bne ccslow
andcc %o0, 0x2, %g0
be 1f
/* Install a pte for a particular vaddr in kernel space. */
void set_pte_vaddr(unsigned long vaddr, pte_t pte);
+extern void native_pagetable_reserve(u64 start, u64 end);
#ifdef CONFIG_X86_32
extern void native_pagetable_setup_start(pgd_t *base);
extern void native_pagetable_setup_done(pgd_t *base);
void (*banner)(void);
};
+/**
+ * struct x86_init_mapping - platform specific initial kernel pagetable setup
+ * @pagetable_reserve: reserve a range of addresses for kernel pagetable usage
+ *
+ * For more details on the purpose of this hook, look in
+ * init_memory_mapping and the commit that added it.
+ */
+struct x86_init_mapping {
+ void (*pagetable_reserve)(u64 start, u64 end);
+};
+
/**
* struct x86_init_paging - platform specific paging functions
* @pagetable_setup_start: platform specific pre paging_init() call
struct x86_init_mpparse mpparse;
struct x86_init_irqs irqs;
struct x86_init_oem oem;
+ struct x86_init_mapping mapping;
struct x86_init_paging paging;
struct x86_init_timers timers;
struct x86_init_iommu iommu;
.banner = default_banner,
},
+ .mapping = {
+ .pagetable_reserve = native_pagetable_reserve,
+ },
+
.paging = {
.pagetable_setup_start = native_pagetable_setup_start,
.pagetable_setup_done = native_pagetable_setup_done,
end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
}
+void __init native_pagetable_reserve(u64 start, u64 end)
+{
+ memblock_x86_reserve_range(start, end, "PGTABLE");
+}
+
struct map_range {
unsigned long start;
unsigned long end;
__flush_tlb_all();
+ /*
+ * Reserve the kernel pagetable pages we used (pgt_buf_start -
+ * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
+ * so that they can be reused for other purposes.
+ *
+ * On native it just means calling memblock_x86_reserve_range, on Xen it
+ * also means marking RW the pagetable pages that we allocated before
+ * but that haven't been used.
+ *
+ * In fact on xen we mark RO the whole range pgt_buf_start -
+ * pgt_buf_top, because we have to make sure that when
+ * init_memory_mapping reaches the pagetable pages area, it maps
+ * RO all the pagetable pages, including the ones that are beyond
+ * pgt_buf_end at that time.
+ */
if (!after_bootmem && pgt_buf_end > pgt_buf_start)
- memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT,
- pgt_buf_end << PAGE_SHIFT, "PGTABLE");
+ x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start),
+ PFN_PHYS(pgt_buf_end));
if (!after_bootmem)
early_memtest(start, end);
{
}
+static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
+{
+ /* reserve the range used */
+ native_pagetable_reserve(start, end);
+
+ /* set as RW the rest */
+ printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
+ PFN_PHYS(pgt_buf_top));
+ while (end < PFN_PHYS(pgt_buf_top)) {
+ make_lowmem_page_readwrite(__va(end));
+ end += PAGE_SIZE;
+ }
+}
+
static void xen_post_allocator_init(void);
static __init void xen_pagetable_setup_done(pgd_t *base)
return ret;
}
-#ifdef CONFIG_X86_64
-static __initdata u64 __last_pgt_set_rw = 0;
-static __initdata u64 __pgt_buf_start = 0;
-static __initdata u64 __pgt_buf_end = 0;
-static __initdata u64 __pgt_buf_top = 0;
-/*
- * As a consequence of the commit:
- *
- * commit 4b239f458c229de044d6905c2b0f9fe16ed9e01e
- * Author: Yinghai Lu <yinghai@kernel.org>
- * Date: Fri Dec 17 16:58:28 2010 -0800
- *
- * x86-64, mm: Put early page table high
- *
- * at some point init_memory_mapping is going to reach the pagetable pages
- * area and map those pages too (mapping them as normal memory that falls
- * in the range of addresses passed to init_memory_mapping as argument).
- * Some of those pages are already pagetable pages (they are in the range
- * pgt_buf_start-pgt_buf_end) therefore they are going to be mapped RO and
- * everything is fine.
- * Some of these pages are not pagetable pages yet (they fall in the range
- * pgt_buf_end-pgt_buf_top; for example the page at pgt_buf_end) so they
- * are going to be mapped RW. When these pages become pagetable pages and
- * are hooked into the pagetable, xen will find that the guest has already
- * a RW mapping of them somewhere and fail the operation.
- * The reason Xen requires pagetables to be RO is that the hypervisor needs
- * to verify that the pagetables are valid before using them. The validation
- * operations are called "pinning".
- *
- * In order to fix the issue we mark all the pages in the entire range
- * pgt_buf_start-pgt_buf_top as RO, however when the pagetable allocation
- * is completed only the range pgt_buf_start-pgt_buf_end is reserved by
- * init_memory_mapping. Hence the kernel is going to crash as soon as one
- * of the pages in the range pgt_buf_end-pgt_buf_top is reused (b/c those
- * ranges are RO).
- *
- * For this reason, 'mark_rw_past_pgt' is introduced which is called _after_
- * the init_memory_mapping has completed (in a perfect world we would
- * call this function from init_memory_mapping, but lets ignore that).
- *
- * Because we are called _after_ init_memory_mapping the pgt_buf_[start,
- * end,top] have all changed to new values (b/c init_memory_mapping
- * is called and setting up another new page-table). Hence, the first time
- * we enter this function, we save away the pgt_buf_start value and update
- * the pgt_buf_[end,top].
- *
- * When we detect that the "old" pgt_buf_start through pgt_buf_end
- * PFNs have been reserved (so memblock_x86_reserve_range has been called),
- * we immediately set out to RW the "old" pgt_buf_end through pgt_buf_top.
- *
- * And then we update those "old" pgt_buf_[end|top] with the new ones
- * so that we can redo this on the next pagetable.
- */
-static __init void mark_rw_past_pgt(void) {
-
- if (pgt_buf_end > pgt_buf_start) {
- u64 addr, size;
-
- /* Save it away. */
- if (!__pgt_buf_start) {
- __pgt_buf_start = pgt_buf_start;
- __pgt_buf_end = pgt_buf_end;
- __pgt_buf_top = pgt_buf_top;
- return;
- }
- /* If we get the range that starts at __pgt_buf_end that means
- * the range is reserved, and that in 'init_memory_mapping'
- * the 'memblock_x86_reserve_range' has been called with the
- * outdated __pgt_buf_start, __pgt_buf_end (the "new"
- * pgt_buf_[start|end|top] refer now to a new pagetable.
- * Note: we are called _after_ the pgt_buf_[..] have been
- * updated.*/
-
- addr = memblock_x86_find_in_range_size(PFN_PHYS(__pgt_buf_start),
- &size, PAGE_SIZE);
-
- /* Still not reserved, meaning 'memblock_x86_reserve_range'
- * hasn't been called yet. Update the _end and _top.*/
- if (addr == PFN_PHYS(__pgt_buf_start)) {
- __pgt_buf_end = pgt_buf_end;
- __pgt_buf_top = pgt_buf_top;
- return;
- }
-
- /* OK, the area is reserved, meaning it is time for us to
- * set RW for the old end->top PFNs. */
-
- /* ..unless we had already done this. */
- if (__pgt_buf_end == __last_pgt_set_rw)
- return;
-
- addr = PFN_PHYS(__pgt_buf_end);
-
- /* set as RW the rest */
- printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n",
- PFN_PHYS(__pgt_buf_end), PFN_PHYS(__pgt_buf_top));
-
- while (addr < PFN_PHYS(__pgt_buf_top)) {
- make_lowmem_page_readwrite(__va(addr));
- addr += PAGE_SIZE;
- }
- /* And update everything so that we are ready for the next
- * pagetable (the one created for regions past 4GB) */
- __last_pgt_set_rw = __pgt_buf_end;
- __pgt_buf_start = pgt_buf_start;
- __pgt_buf_end = pgt_buf_end;
- __pgt_buf_top = pgt_buf_top;
- }
- return;
-}
-#else
-static __init void mark_rw_past_pgt(void) { }
-#endif
static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#ifdef CONFIG_X86_64
{
unsigned long pfn = pte_pfn(pte);
- /*
- * A bit of optimization. We do not need to call the workaround
- * when xen_set_pte_init is called with a PTE with 0 as PFN.
- * That is b/c the pagetable at that point are just being populated
- * with empty values and we can save some cycles by not calling
- * the 'memblock' code.*/
- if (pfn)
- mark_rw_past_pgt();
/*
* If the new pfn is within the range of the newly allocated
* kernel pagetable, and it isn't being mapped into an
static __init void xen_post_allocator_init(void)
{
- mark_rw_past_pgt();
-
#ifdef CONFIG_XEN_DEBUG
pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
#endif
void __init xen_init_mmu_ops(void)
{
+ x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
pv_mmu_ops = xen_mmu_ops;
{
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
- u8 status;
-
- status = readl(port_mmio + PORT_TFDATA) & 0xFF;
-
- /*
- * At end of section 10.1 of AHCI spec (rev 1.3), it states
- * Software shall not set PxCMD.ST to 1 until it is determined
- * that a functoinal device is present on the port as determined by
- * PxTFD.STS.BSY=0, PxTFD.STS.DRQ=0 and PxSSTS.DET=3h
- *
- * Even though most AHCI host controllers work without this check,
- * specific controller will fail under this condition
- */
- if (status & (ATA_BUSY | ATA_DRQ))
- return;
- else {
- ahci_scr_read(&ap->link, SCR_STATUS, &tmp);
-
- if ((tmp & 0xf) != 0x3)
- return;
- }
/* start DMA */
tmp = readl(port_mmio + PORT_CMD);
unsigned int i915_powersave = 1;
module_param_named(powersave, i915_powersave, int, 0600);
-unsigned int i915_semaphores = 1;
+unsigned int i915_semaphores = 0;
module_param_named(semaphores, i915_semaphores, int, 0600);
unsigned int i915_enable_rc6 = 0;
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
+ if (!HAS_PCH_SPLIT(dev))
+ intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
nvbe->nr_pages = 0;
while (num_pages--) {
- if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
+ /* this code path isn't called and is incorrect anyways */
+ if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/
nvbe->pages[nvbe->nr_pages] =
dma_addrs[nvbe->nr_pages];
nvbe->ttm_alloced[nvbe->nr_pages] = true;
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
- cgts_tcc_disable = RREG32(CGTS_TCC_DISABLE);
+ cgts_tcc_disable = 0xff000000;
gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
- smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+ smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
WREG32(SMX_DC_CTL0, smx_dc_ctl0);
WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
- WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
- POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
- SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+ WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
+ POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
+ SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
- WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
- SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
- SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
+ WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
+ SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
+ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
WREG32(VGT_NUM_INSTANCES, 1);
WREG32(CP_PERFMON_CNTL, 0);
- WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
+ WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
FETCH_FIFO_HIWATER(0x4) |
DONE_FIFO_HIWATER(0xe0) |
ALU_UPDATE_FIFO_HIWATER(0x8)));
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32
- * is requested. */
- if (dma_addr[i] != DMA_ERROR_CODE) {
+ /* we reverted the patch using dma_addr in TTM for now but this
+ * code stops building on alpha so just comment it out for now */
+ if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */
rdev->gart.ttm_alloced[p] = true;
rdev->gart.pages_addr[p] = dma_addr[i];
} else {
jiffies, expires);
timer->expires = jiffies + expires;
- timer->data = (unsigned long)&alg_data;
+ timer->data = (unsigned long)alg_data;
add_timer(timer);
}
u8 command;
u8 ref_off;
u16 scratch;
- __be16 sample;
struct spi_message msg;
struct spi_transfer xfer[6];
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ __be16 sample ____cacheline_aligned;
};
struct ads7845_ser_req {
u8 command[3];
- u8 pwrdown[3];
- u8 sample[3];
struct spi_message msg;
struct spi_transfer xfer[2];
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ u8 sample[3] ____cacheline_aligned;
};
static int ads7846_read12_ser(struct device *dev, unsigned command)
int iter, i;
unsigned long flags;
- data->chip->irq_ack(irq_data);
+ data->chip->irq_ack(data);
for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
u32 status;
gpio_request(pdata->ehci_data->reset_gpio_port[0],
"USB1 PHY reset");
gpio_direction_output
- (pdata->ehci_data->reset_gpio_port[0], 1);
+ (pdata->ehci_data->reset_gpio_port[0], 0);
}
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) {
gpio_request(pdata->ehci_data->reset_gpio_port[1],
"USB2 PHY reset");
gpio_direction_output
- (pdata->ehci_data->reset_gpio_port[1], 1);
+ (pdata->ehci_data->reset_gpio_port[1], 0);
}
/* Hold the PHY in RESET for enough time till DIR is high */
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
gpio_set_value
- (pdata->ehci_data->reset_gpio_port[0], 0);
+ (pdata->ehci_data->reset_gpio_port[0], 1);
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
gpio_set_value
- (pdata->ehci_data->reset_gpio_port[1], 0);
+ (pdata->ehci_data->reset_gpio_port[1], 1);
}
end_count:
if (err)
goto out;
}
- if (tscript->flags & TWL4030_SLEEP_SCRIPT)
+ if (tscript->flags & TWL4030_SLEEP_SCRIPT) {
if (order)
pr_warning("TWL4030: Bad order of scripts (sleep "\
"script before wakeup) Leads to boot"\
"failure on some boards\n");
err = twl4030_config_sleep_sequence(address);
+ }
out:
return err;
}
obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_B44) += b44.o
obj-$(CONFIG_FORCEDETH) += forcedeth.o
-obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
+obj-$(CONFIG_NE_H8300) += ne-h8300.o
obj-$(CONFIG_AX88796) += ax88796.o
obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
obj-$(CONFIG_FTMAC100) += ftmac100.o
obj-$(CONFIG_LP486E) += lp486e.o
obj-$(CONFIG_ETH16I) += eth16i.o
-obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
+obj-$(CONFIG_ZORRO8390) += zorro8390.o
obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_DECLANCE) += declance.o
obj-$(CONFIG_ATARILANCE) += atarilance.o
obj-$(CONFIG_A2065) += a2065.o
-obj-$(CONFIG_HYDRA) += hydra.o 8390.o
+obj-$(CONFIG_HYDRA) += hydra.o
obj-$(CONFIG_ARIADNE) += ariadne.o
obj-$(CONFIG_CS89x0) += cs89x0.o
obj-$(CONFIG_MACSONIC) += macsonic.o
* Read the ethernet address string from the on board rom.
* This is an ascii string...
*/
-static int __init etherh_addr(char *addr, struct expansion_card *ec)
+static int __devinit etherh_addr(char *addr, struct expansion_card *ec)
{
struct in_chunk_dir cd;
char *s;
static u32 etherh_regoffsets[16];
static u32 etherm_regoffsets[16];
-static int __init
+static int __devinit
etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
{
const struct etherh_data *data = id->data;
typedef struct mac_addr {
u8 mac_addr_value[ETH_ALEN];
-} mac_addr_t;
+} __packed mac_addr_t;
enum {
BOND_AD_STABLE = 0,
u8 tlv_type_terminator; // = terminator
u8 terminator_length; // = 0
u8 reserved_50[50]; // = 0
-} lacpdu_t;
+} __packed lacpdu_t;
typedef struct lacpdu_header {
struct ethhdr hdr;
struct lacpdu lacpdu;
-} lacpdu_header_t;
+} __packed lacpdu_header_t;
// Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard)
typedef struct bond_marker {
u8 tlv_type_terminator; // = 0x00
u8 terminator_length; // = 0x00
u8 reserved_90[90]; // = 0
-} bond_marker_t;
+} __packed bond_marker_t;
typedef struct bond_marker_header {
struct ethhdr hdr;
struct bond_marker marker;
-} bond_marker_header_t;
+} __packed bond_marker_header_t;
#pragma pack()
netif_start_queue(dev);
}
- init_waitqueue_head(&port->swqe_avail_wq);
- init_waitqueue_head(&port->restart_wq);
-
mutex_unlock(&port->port_lock);
return ret;
INIT_WORK(&port->reset_task, ehea_reset_port);
+ init_waitqueue_head(&port->swqe_avail_wq);
+ init_waitqueue_head(&port->restart_wq);
+
ret = register_netdev(dev);
if (ret) {
pr_err("register_netdev failed. ret=%d\n", ret);
.ndo_open = hydra_open,
.ndo_stop = hydra_close,
- .ndo_start_xmit = ei_start_xmit,
- .ndo_tx_timeout = ei_tx_timeout,
- .ndo_get_stats = ei_get_stats,
- .ndo_set_multicast_list = ei_set_multicast_list,
+ .ndo_start_xmit = __ei_start_xmit,
+ .ndo_tx_timeout = __ei_tx_timeout,
+ .ndo_get_stats = __ei_get_stats,
+ .ndo_set_multicast_list = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ei_poll,
+ .ndo_poll_controller = __ei_poll,
#endif
};
0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
};
- dev = alloc_ei_netdev();
+ dev = ____alloc_ei_netdev(0);
if (!dev)
return -ENOMEM;
#ifndef MODULE
struct net_device * __init ne_probe(int unit)
{
- struct net_device *dev = alloc_ei_netdev();
+ struct net_device *dev = ____alloc_ei_netdev(0);
int err;
if (!dev)
.ndo_open = ne_open,
.ndo_stop = ne_close,
- .ndo_start_xmit = ei_start_xmit,
- .ndo_tx_timeout = ei_tx_timeout,
- .ndo_get_stats = ei_get_stats,
- .ndo_set_multicast_list = ei_set_multicast_list,
+ .ndo_start_xmit = __ei_start_xmit,
+ .ndo_tx_timeout = __ei_tx_timeout,
+ .ndo_get_stats = __ei_get_stats,
+ .ndo_set_multicast_list = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ei_poll,
+ .ndo_poll_controller = __ei_poll,
#endif
};
int err;
for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
- struct net_device *dev = alloc_ei_netdev();
+ struct net_device *dev = ____alloc_ei_netdev(0);
if (!dev)
break;
if (io[this_dev]) {
return &nic_data->mcdi;
}
+static inline void
+efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
+}
+
+static inline void
+efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
+}
+
void efx_mcdi_init(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
const u8 *inbuf, size_t inlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
- unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
+ unsigned pdu = MCDI_PDU(efx);
+ unsigned doorbell = MCDI_DOORBELL(efx);
unsigned int i;
efx_dword_t hdr;
u32 xflags, seqno;
MCDI_HEADER_SEQ, seqno,
MCDI_HEADER_XFLAGS, xflags);
- efx_writed(efx, &hdr, pdu);
+ efx_mcdi_writed(efx, &hdr, pdu);
- for (i = 0; i < inlen; i += 4) {
- _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
- /* use wmb() within loop to inhibit write combining */
- wmb();
- }
+ for (i = 0; i < inlen; i += 4)
+ efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i),
+ pdu + 4 + i);
/* ring the doorbell with a distinctive value */
- _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
- wmb();
+ EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc);
+ efx_mcdi_writed(efx, &hdr, doorbell);
}
static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned int pdu = MCDI_PDU(efx);
int i;
BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
BUG_ON(outlen & 3 || outlen >= 0x100);
for (i = 0; i < outlen; i += 4)
- *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
+ efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i);
}
static int efx_mcdi_poll(struct efx_nic *efx)
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned int time, finish;
unsigned int respseq, respcmd, error;
- unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned int pdu = MCDI_PDU(efx);
unsigned int rc, spins;
efx_dword_t reg;
time = get_seconds();
- rmb();
- efx_readd(efx, ®, pdu);
+ efx_mcdi_readd(efx, ®, pdu);
/* All 1's indicates that shared memory is in reset (and is
* not a valid header). Wait for it to come out reset before
respseq, mcdi->seqno);
rc = EIO;
} else if (error) {
- efx_readd(efx, ®, pdu + 4);
+ efx_mcdi_readd(efx, ®, pdu + 4);
switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
#define TRANSLATE_ERROR(name) \
case MC_CMD_ERR_ ## name: \
/* Test and clear MC-rebooted flag for this port/function */
int efx_mcdi_poll_reboot(struct efx_nic *efx)
{
- unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
+ unsigned int addr = MCDI_REBOOT_FLAG(efx);
efx_dword_t reg;
uint32_t value;
if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
return false;
- efx_readd(efx, ®, addr);
+ efx_mcdi_readd(efx, ®, addr);
value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
if (value == 0)
return 0;
EFX_ZERO_DWORD(reg);
- efx_writed(efx, ®, addr);
+ efx_mcdi_writed(efx, ®, addr);
if (value == MC_STATUS_DWORD_ASSERT)
return -EINTR;
/**
* struct siena_nic_data - Siena NIC state
* @mcdi: Management-Controller-to-Driver Interface
+ * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
* @wol_filter_id: Wake-on-LAN packet filter id
*/
struct siena_nic_data {
struct efx_mcdi_iface mcdi;
+ void __iomem *mcdi_smem;
int wol_filter_id;
};
efx_reado(efx, ®, FR_AZ_CS_DEBUG);
efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
+ /* Initialise MCDI */
+ nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
+ FR_CZ_MC_TREG_SMEM,
+ FR_CZ_MC_TREG_SMEM_STEP *
+ FR_CZ_MC_TREG_SMEM_ROWS);
+ if (!nic_data->mcdi_smem) {
+ netif_err(efx, probe, efx->net_dev,
+ "could not map MCDI at %llx+%x\n",
+ (unsigned long long)efx->membase_phys +
+ FR_CZ_MC_TREG_SMEM,
+ FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
+ rc = -ENOMEM;
+ goto fail1;
+ }
efx_mcdi_init(efx);
/* Recover from a failed assertion before probing */
rc = efx_mcdi_handle_assertion(efx);
if (rc)
- goto fail1;
+ goto fail2;
/* Let the BMC know that the driver is now in charge of link and
* filter settings. We must do this before we reset the NIC */
fail3:
efx_mcdi_drv_attach(efx, false, NULL);
fail2:
+ iounmap(nic_data->mcdi_smem);
fail1:
kfree(efx->nic_data);
return rc;
static void siena_remove_nic(struct efx_nic *efx)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
+
efx_nic_free_buffer(efx, &efx->irq_status);
siena_reset_hw(efx, RESET_TYPE_ALL);
efx_mcdi_drv_attach(efx, false, NULL);
/* Tear down the private nic state */
- kfree(efx->nic_data);
+ iounmap(nic_data->mcdi_smem);
+ kfree(nic_data);
efx->nic_data = NULL;
}
.default_mac_ops = &efx_mcdi_mac_operations,
.revision = EFX_REV_SIENA_A0,
- .mem_map_size = (FR_CZ_MC_TREG_SMEM +
- FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
+ .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
{
struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
int timeout = 200; /* ms */
int i, j;
cancel_delayed_work_sync(&sc->tx_complete_work);
+ if (sc->sc_flags & SC_OP_INVALID) {
+ ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
+ mutex_unlock(&sc->mutex);
+ return;
+ }
+
if (drop)
timeout = 1;
goto set_ch_out;
}
+ if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
+ !iwl_legacy_is_channel_ibss(ch_info)) {
+ IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n");
+ ret = -EINVAL;
+ goto set_ch_out;
+ }
+
spin_lock_irqsave(&priv->lock, flags);
for_each_context(priv, ctx) {
return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
}
+static inline int
+iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch)
+{
+ return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
+}
+
static inline void
__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
{
cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) {
lbs_deb_host(
"EXEC_NEXT_CMD: ignore ENTER_PS cmd\n");
- list_del(&cmdnode->list);
spin_lock_irqsave(&priv->driver_lock, flags);
+ list_del(&cmdnode->list);
lbs_complete_command(priv, cmdnode, 0);
spin_unlock_irqrestore(&priv->driver_lock, flags);
(priv->psstate == PS_STATE_PRE_SLEEP)) {
lbs_deb_host(
"EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n");
- list_del(&cmdnode->list);
spin_lock_irqsave(&priv->driver_lock, flags);
+ list_del(&cmdnode->list);
lbs_complete_command(priv, cmdnode, 0);
spin_unlock_irqrestore(&priv->driver_lock, flags);
priv->needtowakeup = 1;
"EXEC_NEXT_CMD: sending EXIT_PS\n");
}
}
+ spin_lock_irqsave(&priv->driver_lock, flags);
list_del(&cmdnode->list);
+ spin_unlock_irqrestore(&priv->driver_lock, flags);
lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n",
le16_to_cpu(cmd->command));
lbs_submit_command(priv, cmdnode);
board = z->resource.start;
ioaddr = board+cards[i].offset;
- dev = alloc_ei_netdev();
+ dev = ____alloc_ei_netdev(0);
if (!dev)
return -ENOMEM;
if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) {
static const struct net_device_ops zorro8390_netdev_ops = {
.ndo_open = zorro8390_open,
.ndo_stop = zorro8390_close,
- .ndo_start_xmit = ei_start_xmit,
- .ndo_tx_timeout = ei_tx_timeout,
- .ndo_get_stats = ei_get_stats,
- .ndo_set_multicast_list = ei_set_multicast_list,
+ .ndo_start_xmit = __ei_start_xmit,
+ .ndo_tx_timeout = __ei_tx_timeout,
+ .ndo_get_stats = __ei_get_stats,
+ .ndo_set_multicast_list = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ei_poll,
+ .ndo_poll_controller = __ei_poll,
#endif
};
static void __iomem *s3c_rtc_base;
static int s3c_rtc_alarmno = NO_IRQ;
static int s3c_rtc_tickno = NO_IRQ;
+static bool wake_en;
static enum s3c_cpu_type s3c_rtc_cpu_type;
static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
}
s3c_rtc_enable(pdev, 0);
- if (device_may_wakeup(&pdev->dev))
- enable_irq_wake(s3c_rtc_alarmno);
+ if (device_may_wakeup(&pdev->dev) && !wake_en) {
+ if (enable_irq_wake(s3c_rtc_alarmno) == 0)
+ wake_en = true;
+ else
+ dev_err(&pdev->dev, "enable_irq_wake failed\n");
+ }
return 0;
}
writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
}
- if (device_may_wakeup(&pdev->dev))
+ if (device_may_wakeup(&pdev->dev) && wake_en) {
disable_irq_wake(s3c_rtc_alarmno);
+ wake_en = false;
+ }
return 0;
}
static inline int _dasd_term_running_cqr(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
+ int rc;
if (list_empty(&device->ccw_queue))
return 0;
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
- return device->discipline->term_IO(cqr);
+ rc = device->discipline->term_IO(cqr);
+ if (!rc)
+ /*
+ * CQR terminated because a more important request is pending.
+ * Undo decreasing of retry counter because this is
+ * not an error case.
+ */
+ cqr->retries++;
+ return rc;
}
int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
return;
new_incr->rn = rn;
new_incr->standby = standby;
+ if (!standby)
+ new_incr->usecount = 1;
last_rn = 0;
prev = &sclp_mem_list;
list_for_each_entry(incr, &sclp_mem_list, list) {
* have. Allow 1% either way on the nominal for TVs.
*/
#define NR_MONTYPES 6
-static struct fb_monspecs monspecs[NR_MONTYPES] __initdata = {
+static struct fb_monspecs monspecs[NR_MONTYPES] __devinitdata = {
{ /* TV */
.hfmin = 15469,
.hfmax = 15781,
/*
* Everything after here is initialisation!!!
*/
-static struct fb_videomode modedb[] __initdata = {
+static struct fb_videomode modedb[] __devinitdata = {
{ /* 320x256 @ 50Hz */
NULL, 50, 320, 256, 125000, 92, 62, 35, 19, 38, 2,
FB_SYNC_COMP_HIGH_ACT,
}
};
-static struct fb_videomode __initdata
-acornfb_default_mode = {
+static struct fb_videomode acornfb_default_mode __devinitdata = {
.name = NULL,
.refresh = 60,
.xres = 640,
.vmode = FB_VMODE_NONINTERLACED
};
-static void __init acornfb_init_fbinfo(void)
+static void __devinit acornfb_init_fbinfo(void)
{
static int first = 1;
* size can optionally be followed by 'M' or 'K' for
* MB or KB respectively.
*/
-static void __init
-acornfb_parse_mon(char *opt)
+static void __devinit acornfb_parse_mon(char *opt)
{
char *p = opt;
current_par.montype = -1;
}
-static void __init
-acornfb_parse_montype(char *opt)
+static void __devinit acornfb_parse_montype(char *opt)
{
current_par.montype = -2;
}
}
-static void __init
-acornfb_parse_dram(char *opt)
+static void __devinit acornfb_parse_dram(char *opt)
{
unsigned int size;
static struct options {
char *name;
void (*parse)(char *opt);
-} opt_table[] __initdata = {
+} opt_table[] __devinitdata = {
{ "mon", acornfb_parse_mon },
{ "montype", acornfb_parse_montype },
{ "dram", acornfb_parse_dram },
{ NULL, NULL }
};
-int __init
-acornfb_setup(char *options)
+static int __devinit acornfb_setup(char *options)
{
struct options *optp;
char *opt;
* Detect type of monitor connected
* For now, we just assume SVGA
*/
-static int __init
-acornfb_detect_monitortype(void)
+static int __devinit acornfb_detect_monitortype(void)
{
return 4;
}
used |= CEPH_CAP_FILE_CACHE;
if (ci->i_wr_ref)
used |= CEPH_CAP_FILE_WR;
- if (ci->i_wrbuffer_ref)
+ if (ci->i_wb_ref || ci->i_wrbuffer_ref)
used |= CEPH_CAP_FILE_BUFFER;
return used;
}
if (got & CEPH_CAP_FILE_WR)
ci->i_wr_ref++;
if (got & CEPH_CAP_FILE_BUFFER) {
- if (ci->i_wrbuffer_ref == 0)
+ if (ci->i_wb_ref == 0)
ihold(&ci->vfs_inode);
- ci->i_wrbuffer_ref++;
- dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n",
- &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref);
+ ci->i_wb_ref++;
+ dout("__take_cap_refs %p wb %d -> %d (?)\n",
+ &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
}
}
if (--ci->i_rdcache_ref == 0)
last++;
if (had & CEPH_CAP_FILE_BUFFER) {
- if (--ci->i_wrbuffer_ref == 0) {
+ if (--ci->i_wb_ref == 0) {
last++;
put++;
}
- dout("put_cap_refs %p wrbuffer %d -> %d (?)\n",
- inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref);
+ dout("put_cap_refs %p wb %d -> %d (?)\n",
+ inode, ci->i_wb_ref+1, ci->i_wb_ref);
}
if (had & CEPH_CAP_FILE_WR)
if (--ci->i_wr_ref == 0) {
ci->i_rd_ref = 0;
ci->i_rdcache_ref = 0;
ci->i_wr_ref = 0;
+ ci->i_wb_ref = 0;
ci->i_wrbuffer_ref = 0;
ci->i_wrbuffer_ref_head = 0;
ci->i_shared_gen = 0;
{
struct ceph_mds_session *s = con->private;
+ dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1);
ceph_put_mds_session(s);
- dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref));
}
/*
up_write(&mdsc->snap_rwsem);
} else {
spin_lock(&mdsc->snap_empty_lock);
- list_add(&mdsc->snap_empty, &realm->empty_item);
+ list_add(&realm->empty_item, &mdsc->snap_empty);
spin_unlock(&mdsc->snap_empty_lock);
}
}
/* held references to caps */
int i_pin_ref;
- int i_rd_ref, i_rdcache_ref, i_wr_ref;
+ int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref;
int i_wrbuffer_ref, i_wrbuffer_ref_head;
u32 i_shared_gen; /* increment each time we get FILE_SHARED */
u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */
if (!inode)
return 0;
- if (nd->flags & LOOKUP_RCU)
+ if (nd && (nd->flags & LOOKUP_RCU))
return -ECHILD;
fc = get_fuse_conn(inode);
static int acl_permission_check(struct inode *inode, int mask, unsigned int flags,
int (*check_acl)(struct inode *inode, int mask, unsigned int flags))
{
- umode_t mode = inode->i_mode;
+ unsigned int mode = inode->i_mode;
mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
case -EKEYEXPIRED:
rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX);
break;
+ case -NFS4ERR_RETRY_UNCACHED_REP:
+ break;
default:
dprintk("%s DS error. Retry through MDS %d\n", __func__,
task->tk_status);
filelayout_check_layout(struct pnfs_layout_hdr *lo,
struct nfs4_filelayout_segment *fl,
struct nfs4_layoutget_res *lgr,
- struct nfs4_deviceid *id)
+ struct nfs4_deviceid *id,
+ gfp_t gfp_flags)
{
struct nfs4_file_layout_dsaddr *dsaddr;
int status = -EINVAL;
/* find and reference the deviceid */
dsaddr = nfs4_fl_find_get_deviceid(id);
if (dsaddr == NULL) {
- dsaddr = get_device_info(lo->plh_inode, id);
+ dsaddr = get_device_info(lo->plh_inode, id, gfp_flags);
if (dsaddr == NULL)
goto out;
}
filelayout_decode_layout(struct pnfs_layout_hdr *flo,
struct nfs4_filelayout_segment *fl,
struct nfs4_layoutget_res *lgr,
- struct nfs4_deviceid *id)
+ struct nfs4_deviceid *id,
+ gfp_t gfp_flags)
{
struct xdr_stream stream;
struct xdr_buf buf = {
dprintk("%s: set_layout_map Begin\n", __func__);
- scratch = alloc_page(GFP_KERNEL);
+ scratch = alloc_page(gfp_flags);
if (!scratch)
return -ENOMEM;
goto out_err;
fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
- GFP_KERNEL);
+ gfp_flags);
if (!fl->fh_array)
goto out_err;
for (i = 0; i < fl->num_fh; i++) {
/* Do we want to use a mempool here? */
- fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL);
+ fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags);
if (!fl->fh_array[i])
goto out_err_free;
static struct pnfs_layout_segment *
filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
- struct nfs4_layoutget_res *lgr)
+ struct nfs4_layoutget_res *lgr,
+ gfp_t gfp_flags)
{
struct nfs4_filelayout_segment *fl;
int rc;
struct nfs4_deviceid id;
dprintk("--> %s\n", __func__);
- fl = kzalloc(sizeof(*fl), GFP_KERNEL);
+ fl = kzalloc(sizeof(*fl), gfp_flags);
if (!fl)
return NULL;
- rc = filelayout_decode_layout(layoutid, fl, lgr, &id);
- if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id)) {
+ rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags);
+ if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) {
_filelayout_free_lseg(fl);
return NULL;
}
int size = (fl->stripe_type == STRIPE_SPARSE) ?
fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
- fl->commit_buckets = kcalloc(size, sizeof(struct list_head), GFP_KERNEL);
+ fl->commit_buckets = kcalloc(size, sizeof(struct list_head), gfp_flags);
if (!fl->commit_buckets) {
filelayout_free_lseg(&fl->generic_hdr);
return NULL;
nfs4_fl_find_get_deviceid(struct nfs4_deviceid *dev_id);
extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
struct nfs4_file_layout_dsaddr *
-get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id);
+get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags);
#endif /* FS_NFS_NFS4FILELAYOUT_H */
}
static struct nfs4_pnfs_ds *
-nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port)
+nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port, gfp_t gfp_flags)
{
struct nfs4_pnfs_ds *tmp_ds, *ds;
- ds = kzalloc(sizeof(*tmp_ds), GFP_KERNEL);
+ ds = kzalloc(sizeof(*tmp_ds), gfp_flags);
if (!ds)
goto out;
* Currently only support ipv4, and one multi-path address.
*/
static struct nfs4_pnfs_ds *
-decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode)
+decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode, gfp_t gfp_flags)
{
struct nfs4_pnfs_ds *ds = NULL;
char *buf;
rlen);
goto out_err;
}
- buf = kmalloc(rlen + 1, GFP_KERNEL);
+ buf = kmalloc(rlen + 1, gfp_flags);
if (!buf) {
dprintk("%s: Not enough memory\n", __func__);
goto out_err;
sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]);
port = htons((tmp[0] << 8) | (tmp[1]));
- ds = nfs4_pnfs_ds_add(inode, ip_addr, port);
+ ds = nfs4_pnfs_ds_add(inode, ip_addr, port, gfp_flags);
dprintk("%s: Decoded address and port %s\n", __func__, buf);
out_free:
kfree(buf);
/* Decode opaque device data and return the result */
static struct nfs4_file_layout_dsaddr*
-decode_device(struct inode *ino, struct pnfs_device *pdev)
+decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags)
{
int i;
u32 cnt, num;
struct page *scratch;
/* set up xdr stream */
- scratch = alloc_page(GFP_KERNEL);
+ scratch = alloc_page(gfp_flags);
if (!scratch)
goto out_err;
}
/* read stripe indices */
- stripe_indices = kcalloc(cnt, sizeof(u8), GFP_KERNEL);
+ stripe_indices = kcalloc(cnt, sizeof(u8), gfp_flags);
if (!stripe_indices)
goto out_err_free_scratch;
dsaddr = kzalloc(sizeof(*dsaddr) +
(sizeof(struct nfs4_pnfs_ds *) * (num - 1)),
- GFP_KERNEL);
+ gfp_flags);
if (!dsaddr)
goto out_err_free_stripe_indices;
for (j = 0; j < mp_count; j++) {
if (j == 0) {
dsaddr->ds_list[i] = decode_and_add_ds(&stream,
- ino);
+ ino, gfp_flags);
if (dsaddr->ds_list[i] == NULL)
goto out_err_free_deviceid;
} else {
* available devices.
*/
static struct nfs4_file_layout_dsaddr *
-decode_and_add_device(struct inode *inode, struct pnfs_device *dev)
+decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_flags)
{
struct nfs4_file_layout_dsaddr *d, *new;
long hash;
- new = decode_device(inode, dev);
+ new = decode_device(inode, dev, gfp_flags);
if (!new) {
printk(KERN_WARNING "%s: Could not decode or add device\n",
__func__);
* of available devices, and return it.
*/
struct nfs4_file_layout_dsaddr *
-get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
+get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags)
{
struct pnfs_device *pdev = NULL;
u32 max_resp_sz;
dprintk("%s inode %p max_resp_sz %u max_pages %d\n",
__func__, inode, max_resp_sz, max_pages);
- pdev = kzalloc(sizeof(struct pnfs_device), GFP_KERNEL);
+ pdev = kzalloc(sizeof(struct pnfs_device), gfp_flags);
if (pdev == NULL)
return NULL;
- pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL);
+ pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
if (pages == NULL) {
kfree(pdev);
return NULL;
}
for (i = 0; i < max_pages; i++) {
- pages[i] = alloc_page(GFP_KERNEL);
+ pages[i] = alloc_page(gfp_flags);
if (!pages[i])
goto out_free;
}
* Found new device, need to decode it and then add it to the
* list of known devices for this mountpoint.
*/
- dsaddr = decode_and_add_device(inode, pdev);
+ dsaddr = decode_and_add_device(inode, pdev, gfp_flags);
out_free:
for (i = 0; i < max_pages; i++)
__free_page(pages[i]);
ret = nfs4_delay(server->client, &exception->timeout);
if (ret != 0)
break;
+ case -NFS4ERR_RETRY_UNCACHED_REP:
case -NFS4ERR_OLD_STATEID:
exception->retry = 1;
break;
rpc_delay(task, NFS4_POLL_RETRY_MAX);
task->tk_status = 0;
return -EAGAIN;
+ case -NFS4ERR_RETRY_UNCACHED_REP:
case -NFS4ERR_OLD_STATEID:
task->tk_status = 0;
return -EAGAIN;
dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
rpc_delay(task, NFS4_POLL_RETRY_MIN);
task->tk_status = 0;
+ /* fall through */
+ case -NFS4ERR_RETRY_UNCACHED_REP:
nfs_restart_rpc(task, data->clp);
return;
}
break;
case -NFS4ERR_DELAY:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
+ /* fall through */
+ case -NFS4ERR_RETRY_UNCACHED_REP:
return -EAGAIN;
default:
nfs4_schedule_lease_recovery(clp);
plh_layouts);
dprintk("%s freeing layout for inode %lu\n", __func__,
lo->plh_inode->i_ino);
+ list_del_init(&lo->plh_layouts);
pnfs_destroy_layout(NFS_I(lo->plh_inode));
}
}
static struct pnfs_layout_segment *
send_layoutget(struct pnfs_layout_hdr *lo,
struct nfs_open_context *ctx,
- u32 iomode)
+ u32 iomode,
+ gfp_t gfp_flags)
{
struct inode *ino = lo->plh_inode;
struct nfs_server *server = NFS_SERVER(ino);
dprintk("--> %s\n", __func__);
BUG_ON(ctx == NULL);
- lgp = kzalloc(sizeof(*lgp), GFP_KERNEL);
+ lgp = kzalloc(sizeof(*lgp), gfp_flags);
if (lgp == NULL)
return NULL;
max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
max_pages = max_resp_sz >> PAGE_SHIFT;
- pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL);
+ pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
if (!pages)
goto out_err_free;
for (i = 0; i < max_pages; i++) {
- pages[i] = alloc_page(GFP_KERNEL);
+ pages[i] = alloc_page(gfp_flags);
if (!pages[i])
goto out_err_free;
}
lgp->args.layout.pages = pages;
lgp->args.layout.pglen = max_pages * PAGE_SIZE;
lgp->lsegpp = &lseg;
+ lgp->gfp_flags = gfp_flags;
/* Synchronously retrieve layout information from server and
* store in lseg.
}
static struct pnfs_layout_hdr *
-alloc_init_layout_hdr(struct inode *ino)
+alloc_init_layout_hdr(struct inode *ino, gfp_t gfp_flags)
{
struct pnfs_layout_hdr *lo;
- lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL);
+ lo = kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
if (!lo)
return NULL;
atomic_set(&lo->plh_refcount, 1);
}
static struct pnfs_layout_hdr *
-pnfs_find_alloc_layout(struct inode *ino)
+pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags)
{
struct nfs_inode *nfsi = NFS_I(ino);
struct pnfs_layout_hdr *new = NULL;
return nfsi->layout;
}
spin_unlock(&ino->i_lock);
- new = alloc_init_layout_hdr(ino);
+ new = alloc_init_layout_hdr(ino, gfp_flags);
spin_lock(&ino->i_lock);
if (likely(nfsi->layout == NULL)) /* Won the race? */
struct pnfs_layout_segment *
pnfs_update_layout(struct inode *ino,
struct nfs_open_context *ctx,
- enum pnfs_iomode iomode)
+ enum pnfs_iomode iomode,
+ gfp_t gfp_flags)
{
struct nfs_inode *nfsi = NFS_I(ino);
struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
if (!pnfs_enabled_sb(NFS_SERVER(ino)))
return NULL;
spin_lock(&ino->i_lock);
- lo = pnfs_find_alloc_layout(ino);
+ lo = pnfs_find_alloc_layout(ino, gfp_flags);
if (lo == NULL) {
dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
goto out_unlock;
spin_unlock(&clp->cl_lock);
}
- lseg = send_layoutget(lo, ctx, iomode);
+ lseg = send_layoutget(lo, ctx, iomode, gfp_flags);
if (!lseg && first) {
spin_lock(&clp->cl_lock);
list_del_init(&lo->plh_layouts);
goto out;
}
/* Inject layout blob into I/O device driver */
- lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res);
+ lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
if (!lseg || IS_ERR(lseg)) {
if (!lseg)
status = -ENOMEM;
/* This is first coelesce call for a series of nfs_pages */
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
prev->wb_context,
- IOMODE_READ);
+ IOMODE_READ,
+ GFP_KERNEL);
}
return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
}
/* This is first coelesce call for a series of nfs_pages */
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
prev->wb_context,
- IOMODE_RW);
+ IOMODE_RW,
+ GFP_NOFS);
}
return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
}
const u32 id;
const char *name;
struct module *owner;
- struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr);
+ struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags);
void (*free_lseg) (struct pnfs_layout_segment *lseg);
/* test for nfs page cache coalescing */
void put_lseg(struct pnfs_layout_segment *lseg);
struct pnfs_layout_segment *
pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
- enum pnfs_iomode access_type);
+ enum pnfs_iomode access_type, gfp_t gfp_flags);
void set_pnfs_layoutdriver(struct nfs_server *, u32 id);
void unset_pnfs_layoutdriver(struct nfs_server *);
enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *,
static inline struct pnfs_layout_segment *
pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
- enum pnfs_iomode access_type)
+ enum pnfs_iomode access_type, gfp_t gfp_flags)
{
return NULL;
}
atomic_set(&req->wb_complete, requests);
BUG_ON(desc->pg_lseg != NULL);
- lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ);
+ lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL);
ClearPageError(page);
offset = 0;
nbytes = desc->pg_count;
}
req = nfs_list_entry(data->pages.next);
if ((!lseg) && list_is_singular(&data->pages))
- lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ);
+ lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL);
ret = nfs_read_rpcsetup(req, data, &nfs_read_full_ops, desc->pg_count,
0, lseg);
atomic_set(&req->wb_complete, requests);
BUG_ON(desc->pg_lseg);
- lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW);
+ lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS);
ClearPageError(page);
offset = 0;
nbytes = desc->pg_count;
}
req = nfs_list_entry(data->pages.next);
if ((!lseg) && list_is_singular(&data->pages))
- lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW);
+ lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS);
if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
(desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
__alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_node(pgdat, x) \
__alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_node_nopanic(pgdat, x) \
+ __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_pages_node(pgdat, x) \
__alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
extern bool task_ns_capable(struct task_struct *t, int cap);
-
-/**
- * nsown_capable - Check superior capability to one's own user_ns
- * @cap: The capability in question
- *
- * Return true if the current task has the given superior capability
- * targeted at its own user namespace.
- */
-static inline bool nsown_capable(int cap)
-{
- return ns_capable(current_user_ns(), cap);
-}
+extern bool nsown_capable(int cap);
/* audit system wants to get cap info from files as well */
extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
void *security; /* subjective LSM security */
#endif
struct user_struct *user; /* real user ID subscription */
+ struct user_namespace *user_ns; /* cached user->user_ns */
struct group_info *group_info; /* supplementary groups for euid/fsgid */
struct rcu_head rcu; /* RCU deletion hook */
};
#define current_fsgid() (current_cred_xxx(fsgid))
#define current_cap() (current_cred_xxx(cap_effective))
#define current_user() (current_cred_xxx(user))
-#define _current_user_ns() (current_cred_xxx(user)->user_ns)
#define current_security() (current_cred_xxx(security))
-extern struct user_namespace *current_user_ns(void);
+#ifdef CONFIG_USER_NS
+#define current_user_ns() (current_cred_xxx(user_ns))
+#else
+extern struct user_namespace init_user_ns;
+#define current_user_ns() (&init_user_ns)
+#endif
+
#define current_uid_gid(_uid, _gid) \
do { \
void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
void free_pages_exact(void *virt, size_t size);
+/* This is different from alloc_pages_exact_node !!! */
+void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask), 0)
struct nfs4_layoutget_args args;
struct nfs4_layoutget_res res;
struct pnfs_layout_segment **lsegpp;
+ gfp_t gfp_flags;
};
struct nfs4_getdeviceinfo_args {
return outer;
}
-#define INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0)
-#define INET_ECN_dontxmit(sk) \
- do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0)
+static inline void INET_ECN_xmit(struct sock *sk)
+{
+ inet_sk(sk)->tos |= INET_ECN_ECT_0;
+ if (inet6_sk(sk) != NULL)
+ inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
+}
+
+static inline void INET_ECN_dontxmit(struct sock *sk)
+{
+ inet_sk(sk)->tos &= ~INET_ECN_MASK;
+ if (inet6_sk(sk) != NULL)
+ inet6_sk(sk)->tclass &= ~INET_ECN_MASK;
+}
#define IP6_ECN_flow_init(label) do { \
(label) &= ~htonl(INET_ECN_MASK << 20); \
u8 ssap;
u8 ctrl_1;
u8 ctrl_2;
-};
+} __packed;
static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb)
{
u8 dsap;
u8 ssap;
u8 ctrl_1;
-};
+} __packed;
static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
{
u8 fmt_id; /* always 0x81 for LLC */
u8 type; /* different if NULL/non-NULL LSAP */
u8 rw; /* sender receive window */
-};
+} __packed;
/**
* llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID
u8 curr_ssv; /* current send state variable val */
u8 curr_rsv; /* current receive state variable */
u8 ind_bits; /* indicator bits set with macro */
-};
+} __packed;
extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
*/
#define show_gfp_flags(flags) \
(flags) ? __print_flags(flags, "|", \
+ {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \
{(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \
{(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
{(unsigned long)GFP_USER, "GFP_USER"}, \
{(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \
{(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \
{(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
- {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \
+ {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
+ {(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
+ {(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \
+ {(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \
) : "GFP_NOWAIT"
return ns_capable(task_cred_xxx(t, user)->user_ns, cap);
}
EXPORT_SYMBOL(task_ns_capable);
+
+/**
+ * nsown_capable - Check superior capability to one's own user_ns
+ * @cap: The capability in question
+ *
+ * Return true if the current task has the given superior capability
+ * targeted at its own user namespace.
+ */
+bool nsown_capable(int cap)
+{
+ return ns_capable(current_user_ns(), cap);
+}
.cap_effective = CAP_INIT_EFF_SET,
.cap_bset = CAP_INIT_BSET,
.user = INIT_USER,
+ .user_ns = &init_user_ns,
.group_info = &init_groups,
#ifdef CONFIG_KEYS
.tgcred = &init_tgcred,
goto error_put;
}
+ /* cache user_ns in cred. Doesn't need a refcount because it will
+ * stay pinned by cred->user
+ */
+ new->user_ns = new->user->user_ns;
+
#ifdef CONFIG_KEYS
/* new threads get their own thread keyrings if their parent already
* had one */
}
EXPORT_SYMBOL(set_create_files_as);
-struct user_namespace *current_user_ns(void)
-{
- return _current_user_ns();
-}
-EXPORT_SYMBOL(current_user_ns);
-
#ifdef CONFIG_DEBUG_CREDENTIALS
bool creds_are_invalid(const struct cred *cred)
goto Close;
}
suspend_console();
- pm_restrict_gfp_mask();
suspend_test_start();
error = dpm_suspend_start(PMSG_SUSPEND);
if (error) {
suspend_test_start();
dpm_resume_end(PMSG_RESUME);
suspend_test_finish("resume devices");
- pm_restore_gfp_mask();
resume_console();
Close:
if (suspend_ops->end)
goto Finish;
pr_debug("PM: Entering %s sleep\n", pm_states[state]);
+ pm_restrict_gfp_mask();
error = suspend_devices_and_enter(state);
+ pm_restore_gfp_mask();
Finish:
pr_debug("PM: Finishing wakeup.\n");
free_basic_memory_bitmaps();
data = filp->private_data;
free_all_swap_pages(data->swap);
- if (data->frozen)
+ if (data->frozen) {
+ pm_restore_gfp_mask();
thaw_processes();
+ }
pm_notifier_call_chain(data->mode == O_RDONLY ?
PM_POST_HIBERNATION : PM_POST_RESTORE);
atomic_inc(&snapshot_device_available);
* PM_HIBERNATION_PREPARE
*/
error = suspend_devices_and_enter(PM_SUSPEND_MEM);
+ data->ready = 0;
break;
case SNAPSHOT_PLATFORM_SUPPORT:
return string(buf, end, uuid, spec);
}
-int kptr_restrict = 1;
+int kptr_restrict __read_mostly;
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
EXPORT_SYMBOL(free_pages);
+static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
+{
+ if (addr) {
+ unsigned long alloc_end = addr + (PAGE_SIZE << order);
+ unsigned long used = addr + PAGE_ALIGN(size);
+
+ split_page(virt_to_page((void *)addr), order);
+ while (used < alloc_end) {
+ free_page(used);
+ used += PAGE_SIZE;
+ }
+ }
+ return (void *)addr;
+}
+
/**
* alloc_pages_exact - allocate an exact number physically-contiguous pages.
* @size: the number of bytes to allocate
unsigned long addr;
addr = __get_free_pages(gfp_mask, order);
- if (addr) {
- unsigned long alloc_end = addr + (PAGE_SIZE << order);
- unsigned long used = addr + PAGE_ALIGN(size);
-
- split_page(virt_to_page((void *)addr), order);
- while (used < alloc_end) {
- free_page(used);
- used += PAGE_SIZE;
- }
- }
-
- return (void *)addr;
+ return make_alloc_exact(addr, order, size);
}
EXPORT_SYMBOL(alloc_pages_exact);
+/**
+ * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
+ * pages on a node.
+ * @size: the number of bytes to allocate
+ * @gfp_mask: GFP flags for the allocation
+ *
+ * Like alloc_pages_exact(), but try to allocate on node nid first before falling
+ * back.
+ * Note this is not alloc_pages_exact_node() which allocates on a specific node,
+ * but is not exact.
+ */
+void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
+{
+ unsigned order = get_order(size);
+ struct page *p = alloc_pages_node(nid, gfp_mask, order);
+ if (!p)
+ return NULL;
+ return make_alloc_exact((unsigned long)page_address(p), order, size);
+}
+EXPORT_SYMBOL(alloc_pages_exact_nid);
+
/**
* free_pages_exact - release memory allocated via alloc_pages_exact()
* @virt: the value returned by alloc_pages_exact.
if (!slab_is_available()) {
zone->wait_table = (wait_queue_head_t *)
- alloc_bootmem_node(pgdat, alloc_size);
+ alloc_bootmem_node_nopanic(pgdat, alloc_size);
} else {
/*
* This case means that a zone whose size was 0 gets new memory
unsigned long usemapsize = usemap_size(zonesize);
zone->pageblock_flags = NULL;
if (usemapsize)
- zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
+ zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
+ usemapsize);
}
#else
static inline void setup_usemap(struct pglist_data *pgdat,
size = (end - start) * sizeof(struct page);
map = alloc_remap(pgdat->node_id, size);
if (!map)
- map = alloc_bootmem_node(pgdat, size);
+ map = alloc_bootmem_node_nopanic(pgdat, size);
pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
{
void *addr = NULL;
- addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_NOWARN);
+ addr = alloc_pages_exact_nid(nid, size, GFP_KERNEL | __GFP_NOWARN);
if (addr)
return addr;
static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
{
- struct inode *inode;
+ struct address_space *mapping;
unsigned long idx;
unsigned long size;
unsigned long limit;
if (size > SHMEM_NR_DIRECT)
size = SHMEM_NR_DIRECT;
offset = shmem_find_swp(entry, ptr, ptr+size);
- if (offset >= 0)
+ if (offset >= 0) {
+ shmem_swp_balance_unmap();
goto found;
+ }
if (!info->i_indirect)
goto lost2;
if (size > ENTRIES_PER_PAGE)
size = ENTRIES_PER_PAGE;
offset = shmem_find_swp(entry, ptr, ptr+size);
- shmem_swp_unmap(ptr);
if (offset >= 0) {
shmem_dir_unmap(dir);
goto found;
}
+ shmem_swp_unmap(ptr);
}
}
lost1:
return 0;
found:
idx += offset;
- inode = igrab(&info->vfs_inode);
- spin_unlock(&info->lock);
+ ptr += offset;
/*
* Move _head_ to start search for next from here.
*/
if (shmem_swaplist.next != &info->swaplist)
list_move_tail(&shmem_swaplist, &info->swaplist);
- mutex_unlock(&shmem_swaplist_mutex);
- error = 1;
- if (!inode)
- goto out;
/*
- * Charge page using GFP_KERNEL while we can wait.
- * Charged back to the user(not to caller) when swap account is used.
- * add_to_page_cache() will be called with GFP_NOWAIT.
+ * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
+ * but also to hold up shmem_evict_inode(): so inode cannot be freed
+ * beneath us (pagelock doesn't help until the page is in pagecache).
*/
- error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
- if (error)
- goto out;
- error = radix_tree_preload(GFP_KERNEL);
- if (error) {
- mem_cgroup_uncharge_cache_page(page);
- goto out;
- }
- error = 1;
-
- spin_lock(&info->lock);
- ptr = shmem_swp_entry(info, idx, NULL);
- if (ptr && ptr->val == entry.val) {
- error = add_to_page_cache_locked(page, inode->i_mapping,
- idx, GFP_NOWAIT);
- /* does mem_cgroup_uncharge_cache_page on error */
- } else /* we must compensate for our precharge above */
- mem_cgroup_uncharge_cache_page(page);
+ mapping = info->vfs_inode.i_mapping;
+ error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT);
+ /* which does mem_cgroup_uncharge_cache_page on error */
if (error == -EEXIST) {
- struct page *filepage = find_get_page(inode->i_mapping, idx);
+ struct page *filepage = find_get_page(mapping, idx);
error = 1;
if (filepage) {
/*
swap_free(entry);
error = 1; /* not an error, but entry was found */
}
- if (ptr)
- shmem_swp_unmap(ptr);
+ shmem_swp_unmap(ptr);
spin_unlock(&info->lock);
- radix_tree_preload_end();
-out:
- unlock_page(page);
- page_cache_release(page);
- iput(inode); /* allows for NULL */
return error;
}
struct list_head *p, *next;
struct shmem_inode_info *info;
int found = 0;
+ int error;
+
+ /*
+ * Charge page using GFP_KERNEL while we can wait, before taking
+ * the shmem_swaplist_mutex which might hold up shmem_writepage().
+ * Charged back to the user (not to caller) when swap account is used.
+ * add_to_page_cache() will be called with GFP_NOWAIT.
+ */
+ error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
+ if (error)
+ goto out;
+ /*
+ * Try to preload while we can wait, to not make a habit of
+ * draining atomic reserves; but don't latch on to this cpu,
+ * it's okay if sometimes we get rescheduled after this.
+ */
+ error = radix_tree_preload(GFP_KERNEL);
+ if (error)
+ goto uncharge;
+ radix_tree_preload_end();
mutex_lock(&shmem_swaplist_mutex);
list_for_each_safe(p, next, &shmem_swaplist) {
found = shmem_unuse_inode(info, entry, page);
cond_resched();
if (found)
- goto out;
+ break;
}
mutex_unlock(&shmem_swaplist_mutex);
- /*
- * Can some race bring us here? We've been holding page lock,
- * so I think not; but would rather try again later than BUG()
- */
+
+uncharge:
+ if (!found)
+ mem_cgroup_uncharge_cache_page(page);
+ if (found < 0)
+ error = found;
+out:
unlock_page(page);
page_cache_release(page);
-out:
- return (found < 0) ? found : 0;
+ return error;
}
/*
struct address_space *mapping;
unsigned long index;
struct inode *inode;
+ bool unlock_mutex = false;
BUG_ON(!PageLocked(page));
mapping = page->mapping;
else
swap.val = 0;
+ /*
+ * Add inode to shmem_unuse()'s list of swapped-out inodes,
+ * if it's not already there. Do it now because we cannot take
+ * mutex while holding spinlock, and must do so before the page
+ * is moved to swap cache, when its pagelock no longer protects
+ * the inode from eviction. But don't unlock the mutex until
+ * we've taken the spinlock, because shmem_unuse_inode() will
+ * prune a !swapped inode from the swaplist under both locks.
+ */
+ if (swap.val && list_empty(&info->swaplist)) {
+ mutex_lock(&shmem_swaplist_mutex);
+ /* move instead of add in case we're racing */
+ list_move_tail(&info->swaplist, &shmem_swaplist);
+ unlock_mutex = true;
+ }
+
spin_lock(&info->lock);
+ if (unlock_mutex)
+ mutex_unlock(&shmem_swaplist_mutex);
+
if (index >= info->next_index) {
BUG_ON(!(info->flags & SHMEM_TRUNCATE));
goto unlock;
delete_from_page_cache(page);
shmem_swp_set(info, entry, swap.val);
shmem_swp_unmap(entry);
- if (list_empty(&info->swaplist))
- inode = igrab(inode);
- else
- inode = NULL;
spin_unlock(&info->lock);
swap_shmem_alloc(swap);
BUG_ON(page_mapped(page));
swap_writepage(page, wbc);
- if (inode) {
- mutex_lock(&shmem_swaplist_mutex);
- /* move instead of add in case we're racing */
- list_move_tail(&info->swaplist, &shmem_swaplist);
- mutex_unlock(&shmem_swaplist_mutex);
- iput(inode);
- }
return 0;
}
if (sbinfo->max_blocks) {
if (percpu_counter_compare(&sbinfo->used_blocks,
sbinfo->max_blocks) >= 0 ||
- shmem_acct_block(info->flags)) {
- spin_unlock(&info->lock);
- error = -ENOSPC;
- goto failed;
- }
+ shmem_acct_block(info->flags))
+ goto nospace;
percpu_counter_inc(&sbinfo->used_blocks);
spin_lock(&inode->i_lock);
inode->i_blocks += BLOCKS_PER_PAGE;
spin_unlock(&inode->i_lock);
- } else if (shmem_acct_block(info->flags)) {
- spin_unlock(&info->lock);
- error = -ENOSPC;
- goto failed;
- }
+ } else if (shmem_acct_block(info->flags))
+ goto nospace;
if (!filepage) {
int ret;
error = 0;
goto out;
+nospace:
+ /*
+ * Perhaps the page was brought in from swap between find_lock_page
+ * and taking info->lock? We allow for that at add_to_page_cache_lru,
+ * but must also avoid reporting a spurious ENOSPC while working on a
+ * full tmpfs. (When filepage has been passed in to shmem_getpage, it
+ * is already in page cache, which prevents this race from occurring.)
+ */
+ if (!filepage) {
+ struct page *page = find_get_page(mapping, idx);
+ if (page) {
+ spin_unlock(&info->lock);
+ page_cache_release(page);
+ goto repeat;
+ }
+ }
+ spin_unlock(&info->lock);
+ error = -ENOSPC;
failed:
if (*pagep != filepage) {
unlock_page(filepage);
if (!PageLRU(page))
return;
+ if (PageUnevictable(page))
+ return;
+
/* Some processes are using the page */
if (page_mapped(page))
return;
}
strcpy(dirent->d_name, nameptr);
+ kfree(nameptr);
out:
return fake_pdu.offset;
case BT_CONNECTED:
case BT_CONFIG:
- if (sco_pi(sk)->conn) {
- sk->sk_state = BT_DISCONN;
- sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
- hci_conn_put(sco_pi(sk)->conn->hcon);
- sco_pi(sk)->conn = NULL;
- } else
- sco_chan_del(sk, ECONNRESET);
- break;
-
case BT_CONNECT:
case BT_DISCONN:
sco_chan_del(sk, ECONNRESET);
nf_bridge->mask |= BRNF_PKT_TYPE;
}
- if (br_parse_ip_options(skb))
+ if (pf == PF_INET && br_parse_ip_options(skb))
return NF_DROP;
/* The physdev module checks on this */
&local->dynamic_ps_disable_work);
}
+ /* Don't restart the timer if we're not disassociated */
+ if (!ifmgd->associated)
+ return TX_CONTINUE;
+
mod_timer(&local->dynamic_ps_timer, jiffies +
msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
goto out;
nel = le32_to_cpu(buf[0]);
- printk(KERN_ERR "%s: nel=%d\n", __func__, nel);
-
last = p->filename_trans;
while (last && last->next)
last = last->next;
goto out;
name[len] = 0;
- printk(KERN_ERR "%s: ft=%p ft->name=%p ft->name=%s\n", __func__, ft, ft->name, ft->name);
-
rc = next_entry(buf, fp, sizeof(u32) * 4);
if (rc)
goto out;
SOC_DOUBLE_R("Capture Switch", SSM2602_LINVOL, SSM2602_RINVOL, 7, 1, 1),
SOC_SINGLE("Mic Boost (+20dB)", SSM2602_APANA, 0, 1, 0),
-SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 7, 1, 0),
+SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 8, 1, 0),
SOC_SINGLE("Mic Switch", SSM2602_APANA, 1, 1, 1),
SOC_SINGLE("Sidetone Playback Volume", SSM2602_APANA, 6, 3, 1),
.read = ssm2602_read_reg_cache,
.write = ssm2602_write,
.set_bias_level = ssm2602_set_bias_level,
- .reg_cache_size = sizeof(ssm2602_reg),
+ .reg_cache_size = ARRAY_SIZE(ssm2602_reg),
.reg_word_size = sizeof(u16),
.reg_cache_default = ssm2602_reg,
};
* low = 0x1a
* high = 0x1b
*/
-static int ssm2602_i2c_probe(struct i2c_client *i2c,
+static int __devinit ssm2602_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct ssm2602_priv *ssm2602;
return ret;
}
-static int ssm2602_i2c_remove(struct i2c_client *client)
+static int __devexit ssm2602_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
kfree(i2c_get_clientdata(client));
.owner = THIS_MODULE,
},
.probe = ssm2602_i2c_probe,
- .remove = ssm2602_i2c_remove,
+ .remove = __devexit_p(ssm2602_i2c_remove),
.id_table = ssm2602_i2c_id,
};
#endif
.reg_cache_step = 1,
.read = uda134x_read_reg_cache,
.write = uda134x_write,
-#ifdef POWER_OFF_ON_STANDBY
.set_bias_level = uda134x_set_bias_level,
-#endif
};
static int __devinit uda134x_codec_probe(struct platform_device *pdev)
SOC_SINGLE_TLV("DRC Startup Volume", WM8903_DRC_0, 6, 18, 0, drc_tlv_startup),
SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8903_ADC_DIGITAL_VOLUME_LEFT,
- WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 96, 0, digital_tlv),
+ WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 120, 0, digital_tlv),
SOC_ENUM("ADC Companding Mode", adc_companding),
SOC_SINGLE("ADC Companding Switch", WM8903_AUDIO_INTERFACE_0, 3, 1, 0),
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
uint32_t conf;
- if (!dai->active)
+ if (dai->active)
return;
conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
return 0;
}
+static int sst_platform_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_pages(substream);
+}
+
static struct snd_pcm_ops sst_platform_ops = {
.open = sst_platform_open,
.close = sst_platform_close,
.trigger = sst_platform_pcm_trigger,
.pointer = sst_platform_pcm_pointer,
.hw_params = sst_platform_pcm_hw_params,
+ .hw_free = sst_platform_pcm_hw_free,
};
static void sst_pcm_free(struct snd_pcm *pcm)
if (!card->name || !card->dev)
return -EINVAL;
+ dev_set_drvdata(card->dev, card);
+
snd_soc_initialize_card_lists(card);
soc_init_card_debugfs(card);