F: drivers/mmc/host/tmio_mmc.*
TMPFS (SHMEM FILESYSTEM)
-M: Hugh Dickins <hugh.dickins@tiscali.co.uk>
+M: Hugh Dickins <hughd@google.com>
L: linux-mm@kvack.org
S: Maintained
F: include/linux/shmem_fs.h
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 34
-EXTRAVERSION = -rc7
+EXTRAVERSION =
NAME = Sheep on Meth
# *DOCUMENTATION*
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
-#define atomic_read(v) ((v)->counter + 0)
-#define atomic64_read(v) ((v)->counter + 0)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic64_read(v) (*(volatile long *)&(v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
#define atomic64_set(v,i) ((v)->counter = (i))
* strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return.
*/
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
#if __LINUX_ARM_ARCH__ >= 6
#ifdef CONFIG_ARM_ERRATA_411920
extern void v6_icache_inval_all(void);
v6_icache_inval_all();
+#elif defined(CONFIG_SMP) && __LINUX_ARM_ARCH__ >= 7
+ asm("mcr p15, 0, %0, c7, c1, 0 @ invalidate I-cache inner shareable\n"
+ :
+ : "r" (0));
#else
asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
:
#ifndef __ASMARM_SMP_TWD_H
#define __ASMARM_SMP_TWD_H
+#define TWD_TIMER_LOAD 0x00
+#define TWD_TIMER_COUNTER 0x04
+#define TWD_TIMER_CONTROL 0x08
+#define TWD_TIMER_INTSTAT 0x0C
+
+#define TWD_WDOG_LOAD 0x20
+#define TWD_WDOG_COUNTER 0x24
+#define TWD_WDOG_CONTROL 0x28
+#define TWD_WDOG_INTSTAT 0x2C
+#define TWD_WDOG_RESETSTAT 0x30
+#define TWD_WDOG_DISABLE 0x34
+
+#define TWD_TIMER_CONTROL_ENABLE (1 << 0)
+#define TWD_TIMER_CONTROL_ONESHOT (0 << 1)
+#define TWD_TIMER_CONTROL_PERIODIC (1 << 1)
+#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2)
+
struct clock_event_device;
extern void __iomem *twd_base;
#define TLB_V7_UIS_FULL (1 << 20)
#define TLB_V7_UIS_ASID (1 << 21)
+/* Inner Shareable BTB operation (ARMv7 MP extensions) */
+#define TLB_V7_IS_BTB (1 << 22)
+
#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
#define TLB_DCLEAN (1 << 30)
#define TLB_WB (1 << 31)
#endif
#ifdef CONFIG_SMP
-#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
+#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
#else
#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
dsb();
isb();
}
+ if (tlb_flag(TLB_V7_IS_BTB)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
static inline void local_flush_tlb_mm(struct mm_struct *mm)
asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
dsb();
}
+ if (tlb_flag(TLB_V7_IS_BTB)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
static inline void
asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
dsb();
}
+ if (tlb_flag(TLB_V7_IS_BTB)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
dsb();
isb();
}
+ if (tlb_flag(TLB_V7_IS_BTB)) {
+ /* flush the branch target cache */
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
+ dsb();
+ isb();
+ }
}
/*
#include <asm/smp_twd.h>
#include <asm/hardware/gic.h>
-#define TWD_TIMER_LOAD 0x00
-#define TWD_TIMER_COUNTER 0x04
-#define TWD_TIMER_CONTROL 0x08
-#define TWD_TIMER_INTSTAT 0x0C
-
-#define TWD_WDOG_LOAD 0x20
-#define TWD_WDOG_COUNTER 0x24
-#define TWD_WDOG_CONTROL 0x28
-#define TWD_WDOG_INTSTAT 0x2C
-#define TWD_WDOG_RESETSTAT 0x30
-#define TWD_WDOG_DISABLE 0x34
-
-#define TWD_TIMER_CONTROL_ENABLE (1 << 0)
-#define TWD_TIMER_CONTROL_ONESHOT (0 << 1)
-#define TWD_TIMER_CONTROL_PERIODIC (1 << 1)
-#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2)
-
/* set up by the platform code */
void __iomem *twd_base;
mov r0, #0
ldmfd sp!, {r1, pc}
ENDPROC(__clear_user)
+ENDPROC(__clear_user_std)
.pushsection .fixup,"ax"
.align 0
#include "copy_template.S"
ENDPROC(__copy_to_user)
+ENDPROC(__copy_to_user_std)
.pushsection .fixup,"ax"
.align 0
CLK("davinci-mcasp.0", NULL, &mcasp0_clk),
CLK("davinci-mcasp.1", NULL, &mcasp1_clk),
CLK("davinci-mcasp.2", NULL, &mcasp2_clk),
- CLK("musb_hdrc", NULL, &usb20_clk),
+ CLK(NULL, "usb20", &usb20_clk),
CLK(NULL, "aemif", &aemif_clk),
CLK(NULL, "aintc", &aintc_clk),
CLK(NULL, "secu_mgr", &secu_mgr_clk),
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
#endif
1:
+#ifdef CONFIG_SMP
+ str r0, [r0] @ write for ownership
+#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
#else
v6_dma_clean_range:
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
+#ifdef CONFIG_SMP
+ ldr r2, [r0] @ read for ownership
+#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c10, 1 @ clean D line
#else
ENTRY(v6_dma_flush_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
+#ifdef CONFIG_SMP
+ ldr r2, [r0] @ read for ownership
+ str r2, [r0] @ write for ownership
+#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
#else
add r1, r1, r0
teq r2, #DMA_FROM_DEVICE
beq v6_dma_inv_range
- b v6_dma_clean_range
+ teq r2, #DMA_TO_DEVICE
+ beq v6_dma_clean_range
+ b v6_dma_flush_range
ENDPROC(v6_dma_map_area)
/*
* - dir - DMA direction
*/
ENTRY(v6_dma_unmap_area)
- add r1, r1, r0
- teq r2, #DMA_TO_DEVICE
- bne v6_dma_inv_range
mov pc, lr
ENDPROC(v6_dma_unmap_area)
cmp r0, r1
blo 1b
mov r0, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable
+#else
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
+#endif
dsb
isb
mov pc, lr
}
EXPORT_SYMBOL(flush_dcache_page);
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long uaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ memcpy(dst, src, len);
+ if (vma->vm_flags & VM_EXEC)
+ __cpuc_coherent_user_range(uaddr, uaddr + len);
+}
+
void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
size_t size, unsigned int mtype)
{
}
EXPORT_SYMBOL(__arm_ioremap);
-void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
- unsigned int mtype, void *caller)
+void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller)
{
return __arm_ioremap(phys_addr, size, mtype);
}
cmp r0, r1
blo 1b
mov ip, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
+#else
mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
+#endif
dsb
mov pc, lr
ENDPROC(v7wbi_flush_user_tlb_range)
cmp r0, r1
blo 1b
mov r2, #0
+#ifdef CONFIG_SMP
+ mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
+#else
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
+#endif
dsb
isb
mov pc, lr
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
/*
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
/* These should be written in asm but we do it in C for now. */
#define smp_mb__after_atomic_inc() barrier()
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v, i) (((v)->counter) = (i))
#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
#include <asm/system.h>
#define ATOMIC_INIT(i) ((atomic_t) { (i) })
#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
-#define atomic_read(v) ((v)->counter)
-#define atomic64_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic64_read(v) (*(volatile long *)&(v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
#define atomic64_set(v,i) (((v)->counter) = (i))
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
/**
* atomic_set - set atomic variable
# Makefile for Linux arch/m68k/amiga source directory
#
-obj-y := config.o amiints.o cia.o chipram.o amisound.o
+obj-y := config.o amiints.o cia.o chipram.o amisound.o platform.o
obj-$(CONFIG_AMIGA_PCMCIA) += pcmcia.o
--- /dev/null
+/*
+ * Copyright (C) 2007-2009 Geert Uytterhoeven
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/zorro.h>
+
+#include <asm/amigahw.h>
+
+
+#ifdef CONFIG_ZORRO
+
+static const struct resource zorro_resources[] __initconst = {
+ /* Zorro II regions (on Zorro II/III) */
+ {
+ .name = "Zorro II exp",
+ .start = 0x00e80000,
+ .end = 0x00efffff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "Zorro II mem",
+ .start = 0x00200000,
+ .end = 0x009fffff,
+ .flags = IORESOURCE_MEM,
+ },
+ /* Zorro III regions (on Zorro III only) */
+ {
+ .name = "Zorro III exp",
+ .start = 0xff000000,
+ .end = 0xffffffff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "Zorro III cfg",
+ .start = 0x40000000,
+ .end = 0x7fffffff,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+
+static int __init amiga_init_bus(void)
+{
+ if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO))
+ return -ENODEV;
+
+ platform_device_register_simple("amiga-zorro", -1, zorro_resources,
+ AMIGAHW_PRESENT(ZORRO3) ? 4 : 2);
+ return 0;
+}
+
+subsys_initcall(amiga_init_bus);
+
+#endif /* CONFIG_ZORRO */
+
+
+static int __init amiga_init_devices(void)
+{
+ if (!MACH_IS_AMIGA)
+ return -ENODEV;
+
+ /* video hardware */
+ if (AMIGAHW_PRESENT(AMI_VIDEO))
+ platform_device_register_simple("amiga-video", -1, NULL, 0);
+
+
+ /* sound hardware */
+ if (AMIGAHW_PRESENT(AMI_AUDIO))
+ platform_device_register_simple("amiga-audio", -1, NULL, 0);
+
+
+ /* storage interfaces */
+ if (AMIGAHW_PRESENT(AMI_FLOPPY))
+ platform_device_register_simple("amiga-floppy", -1, NULL, 0);
+
+ return 0;
+}
+
+device_initcall(amiga_init_devices);
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
-#include <linux/smp_lock.h>
#include <linux/ioport.h>
#include <linux/capability.h>
#include <linux/fcntl.h>
static unsigned char days_in_mo[] =
{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
-static char rtc_status;
+static atomic_t rtc_status = ATOMIC_INIT(1);
-static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
+static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
unsigned char msr;
}
/*
- * We enforce only one user at a time here with the open/close.
- * Also clear the previous interrupt data on an open, and clean
- * up things on a close.
+ * We enforce only one user at a time here with the open/close.
*/
-
static int rtc_open(struct inode *inode, struct file *file)
{
- lock_kernel();
- if(rtc_status) {
- unlock_kernel();
+ if (!atomic_dec_and_test(&rtc_status)) {
+ atomic_inc(&rtc_status);
return -EBUSY;
}
-
- rtc_status = 1;
- unlock_kernel();
return 0;
}
static int rtc_release(struct inode *inode, struct file *file)
{
- lock_kernel();
- rtc_status = 0;
- unlock_kernel();
+ atomic_inc(&rtc_status);
return 0;
}
*/
static const struct file_operations rtc_fops = {
- .ioctl = rtc_ioctl,
- .open = rtc_open,
- .release = rtc_release,
+ .unlocked_ioctl = rtc_ioctl,
+ .open = rtc_open,
+ .release = rtc_release,
};
static struct miscdevice rtc_dev = {
extern void hp300_sched_init(irq_handler_t vector);
-extern unsigned long hp300_gettimeoffset (void);
-
-
+extern unsigned long hp300_gettimeoffset(void);
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
static inline void atomic_add(int i, atomic_t *v)
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
static __inline__ void atomic_add(int i, atomic_t *v)
#define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr))
#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
#define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr))
+#define ext2_find_next_zero_bit(addr, size, offset) \
+ generic_find_next_zero_le_bit((unsigned long *)addr, size, offset)
+#define ext2_find_next_bit(addr, size, offset) \
+ generic_find_next_le_bit((unsigned long *)addr, size, offset)
static inline int ext2_test_bit(int nr, const void *vaddr)
{
return (p - addr) * 32 + res;
}
-static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size,
- unsigned offset)
+static inline unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
{
- const unsigned long *addr = vaddr;
const unsigned long *p = addr + (offset >> 5);
int bit = offset & 31UL, res;
return (p - addr) * 32 + res;
}
-static inline int ext2_find_next_bit(const void *vaddr, unsigned size,
- unsigned offset)
+static inline unsigned long generic_find_next_le_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
{
- const unsigned long *addr = vaddr;
const unsigned long *p = addr + (offset >> 5);
int bit = offset & 31UL, res;
#ifndef _M68K_PARAM_H
#define _M68K_PARAM_H
-#ifdef __KERNEL__
-# define HZ CONFIG_HZ /* Internal kernel timer frequency */
-# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
-# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
-#endif
-
-#ifndef HZ
-#define HZ 100
-#endif
-
#ifdef __uClinux__
#define EXEC_PAGESIZE 4096
#else
#define EXEC_PAGESIZE 8192
#endif
-#ifndef NOGROUP
-#define NOGROUP (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64 /* max length of hostname */
+#include <asm-generic/param.h>
#endif /* _M68K_PARAM_H */
if (do_page_fault(&fp->ptregs, addr, errorcode)) {
#ifdef DEBUG
- printk("do_page_fault() !=0 \n");
+ printk("do_page_fault() !=0\n");
#endif
if (user_mode(&fp->ptregs)){
/* delay writebacks after signal delivery */
void __init config_mac(void)
{
if (!MACH_IS_MAC)
- printk(KERN_ERR "ERROR: no Mac, but config_mac() called!! \n");
+ printk(KERN_ERR "ERROR: no Mac, but config_mac() called!!\n");
mach_sched_init = mac_sched_init;
mach_init_IRQ = mac_init_IRQ;
*/
iop_preinit();
- printk(KERN_INFO "Detected Macintosh model: %d \n", model);
+ printk(KERN_INFO "Detected Macintosh model: %d\n", model);
/*
* Report booter data:
mac_bi_data.videoaddr, mac_bi_data.videorow,
mac_bi_data.videodepth, mac_bi_data.dimensions & 0xFFFF,
mac_bi_data.dimensions >> 16);
- printk(KERN_DEBUG " Videological 0x%lx phys. 0x%lx, SCC at 0x%lx \n",
+ printk(KERN_DEBUG " Videological 0x%lx phys. 0x%lx, SCC at 0x%lx\n",
mac_bi_data.videological, mac_orig_videoaddr,
mac_bi_data.sccbase);
- printk(KERN_DEBUG " Boottime: 0x%lx GMTBias: 0x%lx \n",
+ printk(KERN_DEBUG " Boottime: 0x%lx GMTBias: 0x%lx\n",
mac_bi_data.boottime, mac_bi_data.gmtbias);
- printk(KERN_DEBUG " Machine ID: %ld CPUid: 0x%lx memory size: 0x%lx \n",
+ printk(KERN_DEBUG " Machine ID: %ld CPUid: 0x%lx memory size: 0x%lx\n",
mac_bi_data.id, mac_bi_data.cpuid, mac_bi_data.memsize);
iop_init();
* the fault.
*/
- survive:
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
#ifdef DEBUG
printk("handle_mm_fault returns %d\n",fault);
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if (is_global_init(current)) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
-
- printk("VM: killing process %s\n", current->comm);
- if (user_mode(regs))
- do_group_exit(SIGKILL);
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
+ return 0;
no_context:
current->thread.signo = SIGBUS;
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
-#include <linux/smp_lock.h>
#include <linux/ioport.h>
#include <linux/capability.h>
#include <linux/fcntl.h>
static atomic_t rtc_ready = ATOMIC_INIT(1);
-static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
+static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
volatile MK48T08ptr_t rtc = (MK48T08ptr_t)MVME_RTC_BASE;
unsigned long flags;
}
/*
- * We enforce only one user at a time here with the open/close.
- * Also clear the previous interrupt data on an open, and clean
- * up things on a close.
+ * We enforce only one user at a time here with the open/close.
*/
-
static int rtc_open(struct inode *inode, struct file *file)
{
- lock_kernel();
if( !atomic_dec_and_test(&rtc_ready) )
{
atomic_inc( &rtc_ready );
- unlock_kernel();
return -EBUSY;
}
- unlock_kernel();
-
return 0;
}
*/
static const struct file_operations rtc_fops = {
- .ioctl = rtc_ioctl,
- .open = rtc_open,
- .release = rtc_release,
+ .unlocked_ioctl = rtc_ioctl,
+ .open = rtc_open,
+ .release = rtc_release,
};
static struct miscdevice rtc_dev=
{
halted = 1;
printk("\n\n*******************************************\n"
- "Called q40_reset : press the RESET button!! \n"
+ "Called q40_reset : press the RESET button!!\n"
"*******************************************\n");
Q40_LED_ON();
while (1)
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
/*
* atomic_set - set atomic variable
* @v: pointer of type atomic64_t
*
*/
-#define atomic64_read(v) ((v)->counter)
+#define atomic64_read(v) (*(volatile long *)&(v)->counter)
/*
* atomic64_set - set atomic variable
#define FPU_CSR_COND6 0x40000000 /* $fcc6 */
#define FPU_CSR_COND7 0x80000000 /* $fcc7 */
+/*
+ * Bits 18 - 20 of the FPU Status Register will be read as 0,
+ * and should be written as zero.
+ */
+#define FPU_CSR_RSVD 0x001c0000
+
/*
* X the exception cause indicator
* E the exception enable
#define FPU_CSR_UDF_S 0x00000008
#define FPU_CSR_INE_S 0x00000004
-/* rounding mode */
+/* Bits 0 and 1 of FPU Status Register specify the rounding mode */
+#define FPU_CSR_RM 0x00000003
#define FPU_CSR_RN 0x0 /* nearest */
#define FPU_CSR_RZ 0x1 /* towards zero */
#define FPU_CSR_RU 0x2 /* towards +Infinity */
PTR sys_fchmodat
PTR sys_faccessat
PTR compat_sys_pselect6
- PTR sys_ppoll /* 6265 */
+ PTR compat_sys_ppoll /* 6265 */
PTR sys_unshare
PTR sys_splice
PTR sys_sync_file_range
#define FPCREG_RID 0 /* $0 = revision id */
#define FPCREG_CSR 31 /* $31 = csr */
+/* Determine rounding mode from the RM bits of the FCSR */
+#define modeindex(v) ((v) & FPU_CSR_RM)
+
/* Convert Mips rounding mode (0..3) to IEEE library modes. */
static const unsigned char ieee_rm[4] = {
[FPU_CSR_RN] = IEEE754_RN,
(void *) (xcp->cp0_epc),
MIPSInst_RT(ir), value);
#endif
- value &= (FPU_CSR_FLUSH | FPU_CSR_ALL_E | FPU_CSR_ALL_S | 0x03);
- ctx->fcr31 &= ~(FPU_CSR_FLUSH | FPU_CSR_ALL_E | FPU_CSR_ALL_S | 0x03);
- /* convert to ieee library modes */
- ctx->fcr31 |= (value & ~0x3) | ieee_rm[value & 0x3];
+
+ /*
+ * Don't write reserved bits,
+ * and convert to ieee library modes
+ */
+ ctx->fcr31 = (value &
+ ~(FPU_CSR_RSVD | FPU_CSR_RM)) |
+ ieee_rm[modeindex(value)];
}
if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
return SIGFPE;
*/
/* Check whether the irq belongs to me */
- enabled = read_c0_perfcnt() & LOONGSON2_PERFCNT_INT_EN;
+ enabled = read_c0_perfctrl() & LOONGSON2_PERFCNT_INT_EN;
if (!enabled)
return IRQ_NONE;
enabled = reg.cnt1_enabled | reg.cnt2_enabled;
* Atomically reads the value of @v. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
/**
* atomic_set - set atomic variable
static __inline__ int atomic_read(const atomic_t *v)
{
- return v->counter;
+ return (*(volatile int *)&(v)->counter);
}
/* exported interface */
static __inline__ s64
atomic64_read(const atomic64_t *v)
{
- return v->counter;
+ return (*(volatile long *)&(v)->counter);
}
#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v))))
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
#if defined(CONFIG_GUSA_RB)
extern int atomic_add_unless(atomic_t *, int, int);
extern void atomic_set(atomic_t *, int);
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) ((v)->counter)
-#define atomic64_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic64_read(v) (*(volatile long *)&(v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
#define atomic64_set(v, i) (((v)->counter) = i)
*/
static inline int atomic_read(const atomic_t *v)
{
- return v->counter;
+ return (*(volatile int *)&(v)->counter);
}
/**
*/
static inline long atomic64_read(const atomic64_t *v)
{
- return v->counter;
+ return (*(volatile long *)&(v)->counter);
}
/**
extern int k8_scan_nodes(void);
#ifdef CONFIG_K8_NB
+extern int num_k8_northbridges;
+
static inline struct pci_dev *node_to_k8_nb_misc(int node)
{
return (node < num_k8_northbridges) ? k8_northbridges[node] : NULL;
}
+
#else
+#define num_k8_northbridges 0
+
static inline struct pci_dev *node_to_k8_nb_misc(int node)
{
return NULL;
(boot_cpu_data.x86_mask < 0x1)))
return;
+ /* not in virtualized environments */
+ if (num_k8_northbridges == 0)
+ return;
+
this_leaf->can_disable = true;
this_leaf->l3_indices = amd_calc_l3_indices();
}
* check OSVW bit for CPUs that are not affected
* by erratum #400
*/
- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
- if (val >= 2) {
- rdmsrl(MSR_AMD64_OSVW_STATUS, val);
- if (!(val & BIT(1)))
- goto no_c1e_idle;
+ if (cpu_has(c, X86_FEATURE_OSVW)) {
+ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
+ if (val >= 2) {
+ rdmsrl(MSR_AMD64_OSVW_STATUS, val);
+ if (!(val & BIT(1)))
+ goto no_c1e_idle;
+ }
}
return 1;
}
* node, it must now point to the fake node ID.
*/
for (j = 0; j < MAX_LOCAL_APIC; j++)
- if (apicid_to_node[j] == nid)
+ if (apicid_to_node[j] == nid &&
+ fake_apicid_to_node[j] == NUMA_NO_NODE)
fake_apicid_to_node[j] = i;
}
for (i = 0; i < num_nodes; i++)
u32 size;
int i;
+ /* Must have extended configuration space */
+ if (dev->cfg_size < PCIE_CAP_OFFSET + 4)
+ return;
+
/* Fixup the BAR sizes for fixed BAR devices and make them unmoveable */
offset = fixed_bar_cap(dev->bus, dev->devfn);
if (!offset || PCI_DEVFN(2, 0) == dev->devfn ||
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
/**
* atomic_set - set atomic variable
* released.
*/
int platform_device_add_resources(struct platform_device *pdev,
- struct resource *res, unsigned int num)
+ const struct resource *res, unsigned int num)
{
struct resource *r;
*/
struct platform_device *platform_device_register_simple(const char *name,
int id,
- struct resource *res,
+ const struct resource *res,
unsigned int num)
{
struct platform_device *pdev;
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
return get_disk(unit[drive].gendisk);
}
-static int __init amiga_floppy_init(void)
+static int __init amiga_floppy_probe(struct platform_device *pdev)
{
int i, ret;
- if (!MACH_IS_AMIGA)
- return -ENODEV;
-
- if (!AMIGAHW_PRESENT(AMI_FLOPPY))
- return -ENODEV;
-
if (register_blkdev(FLOPPY_MAJOR,"fd"))
return -EBUSY;
- /*
- * We request DSKPTR, DSKLEN and DSKDATA only, because the other
- * floppy registers are too spreaded over the custom register space
- */
- ret = -EBUSY;
- if (!request_mem_region(CUSTOM_PHYSADDR+0x20, 8, "amiflop [Paula]")) {
- printk("fd: cannot get floppy registers\n");
- goto out_blkdev;
- }
-
ret = -ENOMEM;
if ((raw_buf = (char *)amiga_chip_alloc (RAW_BUF_SIZE, "Floppy")) ==
NULL) {
printk("fd: cannot get chip mem buffer\n");
- goto out_memregion;
+ goto out_blkdev;
}
ret = -EBUSY;
free_irq(IRQ_AMIGA_DSKBLK, NULL);
out_irq:
amiga_chip_free(raw_buf);
-out_memregion:
- release_mem_region(CUSTOM_PHYSADDR+0x20, 8);
out_blkdev:
unregister_blkdev(FLOPPY_MAJOR,"fd");
return ret;
}
-module_init(amiga_floppy_init);
-#ifdef MODULE
-
#if 0 /* not safe to unload */
-void cleanup_module(void)
+static int __exit amiga_floppy_remove(struct platform_device *pdev)
{
int i;
custom.dmacon = DMAF_DISK; /* disable DMA */
amiga_chip_free(raw_buf);
blk_cleanup_queue(floppy_queue);
- release_mem_region(CUSTOM_PHYSADDR+0x20, 8);
unregister_blkdev(FLOPPY_MAJOR, "fd");
}
#endif
-#else
+static struct platform_driver amiga_floppy_driver = {
+ .driver = {
+ .name = "amiga-floppy",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init amiga_floppy_init(void)
+{
+ return platform_driver_probe(&amiga_floppy_driver, amiga_floppy_probe);
+}
+
+module_init(amiga_floppy_init);
+
+#ifndef MODULE
static int __init amiga_floppy_setup (char *str)
{
int n;
__setup("floppy=", amiga_floppy_setup);
#endif
+
+MODULE_ALIAS("platform:amiga-floppy");
char data;
int char_count;
int save_cnt;
- int len;
/* determine the channel and change to that context */
channel = (u_short) (base_addr[CyLICR] >> 2);
cy_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
- unsigned long val;
struct cyclades_port *info = tty->driver_data;
int ret_val = 0;
void __user *argp = (void __user *)arg;
dmabuf = (unsigned *)tmpv;
}
+ flush_kernel_dcache_page(sg_page(sg));
kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
- dmac_flush_range((void *)sgbuffer, ((void *)sgbuffer) + amount);
data->bytes_xfered += amount;
if (size == 0)
break;
{ ZORRO_PROD_AMERISTAR_A2065 },
{ 0 }
};
+MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl);
static struct zorro_driver a2065_driver = {
.name = "a2065",
{ ZORRO_PROD_VILLAGE_TRONIC_ARIADNE },
{ 0 }
};
+MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl);
static struct zorro_driver ariadne_driver = {
.name = "ariadne",
{ ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
{ 0 }
};
+MODULE_DEVICE_TABLE(zorro, hydra_zorro_tbl);
static struct zorro_driver hydra_driver = {
.name = "hydra",
{ ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, },
{ 0 }
};
+MODULE_DEVICE_TABLE(zorro, zorro8390_zorro_tbl);
static struct zorro_driver zorro8390_driver = {
.name = "zorro8390",
},
{ 0 }
};
+MODULE_DEVICE_TABLE(zorro, zorro7xx_zorro_tbl);
static int __devinit zorro7xx_init_one(struct zorro_dev *z,
const struct zorro_device_id *ent)
mutex_lock(&inode->i_mutex);
dentry_unhash(dentry);
if (usbfs_empty(dentry)) {
+ dont_mount(dentry);
drop_nlink(dentry->d_inode);
drop_nlink(dentry->d_inode);
dput(dentry);
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/ioport.h>
-
+#include <linux/platform_device.h>
#include <linux/uaccess.h>
+
#include <asm/system.h>
#include <asm/irq.h>
#include <asm/amigahw.h>
* Interface to the low level console driver
*/
-static void amifb_deinit(void);
+static void amifb_deinit(struct platform_device *pdev);
/*
* Internal routines
* Initialisation
*/
-static int __init amifb_init(void)
+static int __init amifb_probe(struct platform_device *pdev)
{
int tag, i, err = 0;
u_long chipptr;
}
amifb_setup(option);
#endif
- if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_VIDEO))
- return -ENODEV;
-
- /*
- * We request all registers starting from bplpt[0]
- */
- if (!request_mem_region(CUSTOM_PHYSADDR+0xe0, 0x120,
- "amifb [Denise/Lisa]"))
- return -EBUSY;
-
custom.dmacon = DMAF_ALL | DMAF_MASTER;
switch (amiga_chipset) {
fb_info.fbops = &amifb_ops;
fb_info.par = ¤tpar;
fb_info.flags = FBINFO_DEFAULT;
+ fb_info.device = &pdev->dev;
if (!fb_find_mode(&fb_info.var, &fb_info, mode_option, ami_modedb,
NUM_TOTAL_MODES, &ami_modedb[defmode], 4)) {
return 0;
amifb_error:
- amifb_deinit();
+ amifb_deinit(pdev);
return err;
}
-static void amifb_deinit(void)
+static void amifb_deinit(struct platform_device *pdev)
{
if (fb_info.cmap.len)
fb_dealloc_cmap(&fb_info.cmap);
+ fb_dealloc_cmap(&fb_info.cmap);
chipfree();
if (videomemory)
iounmap((void*)videomemory);
- release_mem_region(CUSTOM_PHYSADDR+0xe0, 0x120);
custom.dmacon = DMAF_ALL | DMAF_MASTER;
}
}
}
-static void __exit amifb_exit(void)
+static int __exit amifb_remove(struct platform_device *pdev)
{
unregister_framebuffer(&fb_info);
- amifb_deinit();
+ amifb_deinit(pdev);
amifb_video_off();
+ return 0;
+}
+
+static struct platform_driver amifb_driver = {
+ .remove = __exit_p(amifb_remove),
+ .driver = {
+ .name = "amiga-video",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init amifb_init(void)
+{
+ return platform_driver_probe(&amifb_driver, amifb_probe);
}
module_init(amifb_init);
+
+static void __exit amifb_exit(void)
+{
+ platform_driver_unregister(&amifb_driver);
+}
+
module_exit(amifb_exit);
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:amiga-video");
},
{ 0 }
};
+MODULE_DEVICE_TABLE(zorro, cirrusfb_zorro_table);
static const struct {
zorro_id id2;
{ ZORRO_PROD_HELFRICH_RAINBOW_II },
{ 0 }
};
+MODULE_DEVICE_TABLE(zorro, fm2fb_devices);
static struct zorro_driver fm2fb_driver = {
.name = "fm2fb",
config MPCORE_WATCHDOG
tristate "MPcore watchdog"
- depends on ARM_MPCORE_PLATFORM && LOCAL_TIMERS
+ depends on HAVE_ARM_TWD
help
Watchdog timer embedded into the MPcore system.
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <linux/io.h>
-#include <asm/hardware/arm_twd.h>
+#include <asm/smp_twd.h>
struct mpcore_wdt {
unsigned long timer_alive;
};
static struct platform_device *mpcore_wdt_dev;
-extern unsigned int mpcore_timer_rate;
+static DEFINE_SPINLOCK(wdt_lock);
#define TIMER_MARGIN 60
static int mpcore_margin = TIMER_MARGIN;
*/
static void mpcore_wdt_keepalive(struct mpcore_wdt *wdt)
{
- unsigned int count;
+ unsigned long count;
+ spin_lock(&wdt_lock);
/* Assume prescale is set to 256 */
- count = (mpcore_timer_rate / 256) * mpcore_margin;
+ count = __raw_readl(wdt->base + TWD_WDOG_COUNTER);
+ count = (0xFFFFFFFFU - count) * (HZ / 5);
+ count = (count / 256) * mpcore_margin;
/* Reload the counter */
- spin_lock(&wdt_lock);
writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD);
wdt->perturb = wdt->perturb ? 0 : 1;
spin_unlock(&wdt_lock);
{
dev_printk(KERN_INFO, wdt->dev, "enabling watchdog.\n");
- spin_lock(&wdt_lock);
/* This loads the count register but does NOT start the count yet */
mpcore_wdt_keepalive(wdt);
/* Enable watchdog - prescale=256, watchdog mode=1, enable=1 */
writel(0x0000FF09, wdt->base + TWD_WDOG_CONTROL);
}
- spin_unlock(&wdt_lock);
}
static int mpcore_wdt_set_heartbeat(int t)
mpcore_wdt_miscdev.parent = &dev->dev;
ret = misc_register(&mpcore_wdt_miscdev);
if (ret) {
- dev_printk(KERN_ERR, _dev,
+ dev_printk(KERN_ERR, wdt->dev,
"cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, ret);
goto err_misc;
ret = request_irq(wdt->irq, mpcore_wdt_fire, IRQF_DISABLED,
"mpcore_wdt", wdt);
if (ret) {
- dev_printk(KERN_ERR, _dev,
+ dev_printk(KERN_ERR, wdt->dev,
"cannot register IRQ%d for watchdog\n", wdt->irq);
goto err_irq;
}
mpcore_wdt_stop(wdt);
- platform_set_drvdata(&dev->dev, wdt);
+ platform_set_drvdata(dev, wdt);
mpcore_wdt_dev = dev;
return 0;
static int zorro_seq_show(struct seq_file *m, void *v)
{
- u_int slot = *(loff_t *)v;
+ unsigned int slot = *(loff_t *)v;
struct zorro_dev *z = &zorro_autocon[slot];
seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id,
static struct proc_dir_entry *proc_bus_zorro_dir;
-static int __init zorro_proc_attach_device(u_int slot)
+static int __init zorro_proc_attach_device(unsigned int slot)
{
struct proc_dir_entry *entry;
char name[4];
static int __init zorro_proc_init(void)
{
- u_int slot;
+ unsigned int slot;
if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) {
proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL);
return 0;
}
+static int zorro_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+#ifdef CONFIG_HOTPLUG
+ struct zorro_dev *z;
+
+ if (!dev)
+ return -ENODEV;
+
+ z = to_zorro_dev(dev);
+ if (!z)
+ return -ENODEV;
+
+ if (add_uevent_var(env, "ZORRO_ID=%08X", z->id) ||
+ add_uevent_var(env, "ZORRO_SLOT_NAME=%s", dev_name(dev)) ||
+ add_uevent_var(env, "ZORRO_SLOT_ADDR=%04X", z->slotaddr) ||
+ add_uevent_var(env, "MODALIAS=" ZORRO_DEVICE_MODALIAS_FMT, z->id))
+ return -ENOMEM;
+
+ return 0;
+#else /* !CONFIG_HOTPLUG */
+ return -ENODEV;
+#endif /* !CONFIG_HOTPLUG */
+}
struct bus_type zorro_bus_type = {
.name = "zorro",
.match = zorro_bus_match,
+ .uevent = zorro_uevent,
.probe = zorro_device_probe,
.remove = zorro_device_remove,
};
.read = zorro_read_config,
};
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct zorro_dev *z = to_zorro_dev(dev);
+
+ return sprintf(buf, ZORRO_DEVICE_MODALIAS_FMT "\n", z->id);
+}
+
+static DEVICE_ATTR(modalias, S_IRUGO, modalias_show, NULL);
+
int zorro_create_sysfs_dev_files(struct zorro_dev *z)
{
struct device *dev = &z->dev;
(error = device_create_file(dev, &dev_attr_slotaddr)) ||
(error = device_create_file(dev, &dev_attr_slotsize)) ||
(error = device_create_file(dev, &dev_attr_resource)) ||
+ (error = device_create_file(dev, &dev_attr_modalias)) ||
(error = sysfs_create_bin_file(&dev->kobj, &zorro_config_attr)))
return error;
#include <linux/zorro.h>
#include <linux/bitops.h>
#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <asm/setup.h>
#include <asm/amigahw.h>
* Zorro Expansion Devices
*/
-u_int zorro_num_autocon = 0;
+unsigned int zorro_num_autocon;
struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO];
/*
- * Single Zorro bus
+ * Zorro bus
*/
-struct zorro_bus zorro_bus = {\
- .resources = {
- /* Zorro II regions (on Zorro II/III) */
- { .name = "Zorro II exp", .start = 0x00e80000, .end = 0x00efffff },
- { .name = "Zorro II mem", .start = 0x00200000, .end = 0x009fffff },
- /* Zorro III regions (on Zorro III only) */
- { .name = "Zorro III exp", .start = 0xff000000, .end = 0xffffffff },
- { .name = "Zorro III cfg", .start = 0x40000000, .end = 0x7fffffff }
- },
- .name = "Zorro bus"
+struct zorro_bus {
+ struct list_head devices; /* list of devices on this bus */
+ struct device dev;
};
struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from)
{
- struct zorro_dev *z;
+ struct zorro_dev *z;
- if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO))
- return NULL;
+ if (!zorro_num_autocon)
+ return NULL;
- for (z = from ? from+1 : &zorro_autocon[0];
- z < zorro_autocon+zorro_num_autocon;
- z++)
- if (id == ZORRO_WILDCARD || id == z->id)
- return z;
- return NULL;
+ for (z = from ? from+1 : &zorro_autocon[0];
+ z < zorro_autocon+zorro_num_autocon;
+ z++)
+ if (id == ZORRO_WILDCARD || id == z->id)
+ return z;
+ return NULL;
}
+EXPORT_SYMBOL(zorro_find_device);
/*
*/
DECLARE_BITMAP(zorro_unused_z2ram, 128);
+EXPORT_SYMBOL(zorro_unused_z2ram);
static void __init mark_region(unsigned long start, unsigned long end,
int flag)
{
- if (flag)
- start += Z2RAM_CHUNKMASK;
- else
- end += Z2RAM_CHUNKMASK;
- start &= ~Z2RAM_CHUNKMASK;
- end &= ~Z2RAM_CHUNKMASK;
-
- if (end <= Z2RAM_START || start >= Z2RAM_END)
- return;
- start = start < Z2RAM_START ? 0x00000000 : start-Z2RAM_START;
- end = end > Z2RAM_END ? Z2RAM_SIZE : end-Z2RAM_START;
- while (start < end) {
- u32 chunk = start>>Z2RAM_CHUNKSHIFT;
if (flag)
- set_bit(chunk, zorro_unused_z2ram);
+ start += Z2RAM_CHUNKMASK;
else
- clear_bit(chunk, zorro_unused_z2ram);
- start += Z2RAM_CHUNKSIZE;
- }
+ end += Z2RAM_CHUNKMASK;
+ start &= ~Z2RAM_CHUNKMASK;
+ end &= ~Z2RAM_CHUNKMASK;
+
+ if (end <= Z2RAM_START || start >= Z2RAM_END)
+ return;
+ start = start < Z2RAM_START ? 0x00000000 : start-Z2RAM_START;
+ end = end > Z2RAM_END ? Z2RAM_SIZE : end-Z2RAM_START;
+ while (start < end) {
+ u32 chunk = start>>Z2RAM_CHUNKSHIFT;
+ if (flag)
+ set_bit(chunk, zorro_unused_z2ram);
+ else
+ clear_bit(chunk, zorro_unused_z2ram);
+ start += Z2RAM_CHUNKSIZE;
+ }
}
-static struct resource __init *zorro_find_parent_resource(struct zorro_dev *z)
+static struct resource __init *zorro_find_parent_resource(
+ struct platform_device *bridge, struct zorro_dev *z)
{
- int i;
+ int i;
- for (i = 0; i < zorro_bus.num_resources; i++)
- if (zorro_resource_start(z) >= zorro_bus.resources[i].start &&
- zorro_resource_end(z) <= zorro_bus.resources[i].end)
- return &zorro_bus.resources[i];
- return &iomem_resource;
+ for (i = 0; i < bridge->num_resources; i++) {
+ struct resource *r = &bridge->resource[i];
+ if (zorro_resource_start(z) >= r->start &&
+ zorro_resource_end(z) <= r->end)
+ return r;
+ }
+ return &iomem_resource;
}
- /*
- * Initialization
- */
-static int __init zorro_init(void)
+static int __init amiga_zorro_probe(struct platform_device *pdev)
{
- struct zorro_dev *z;
- unsigned int i;
- int error;
-
- if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(ZORRO))
- return 0;
-
- pr_info("Zorro: Probing AutoConfig expansion devices: %d device%s\n",
- zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
-
- /* Initialize the Zorro bus */
- INIT_LIST_HEAD(&zorro_bus.devices);
- dev_set_name(&zorro_bus.dev, "zorro");
- error = device_register(&zorro_bus.dev);
- if (error) {
- pr_err("Zorro: Error registering zorro_bus\n");
- return error;
- }
-
- /* Request the resources */
- zorro_bus.num_resources = AMIGAHW_PRESENT(ZORRO3) ? 4 : 2;
- for (i = 0; i < zorro_bus.num_resources; i++)
- request_resource(&iomem_resource, &zorro_bus.resources[i]);
-
- /* Register all devices */
- for (i = 0; i < zorro_num_autocon; i++) {
- z = &zorro_autocon[i];
- z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8);
- if (z->id == ZORRO_PROD_GVP_EPC_BASE) {
- /* GVP quirk */
- unsigned long magic = zorro_resource_start(z)+0x8000;
- z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK;
- }
- sprintf(z->name, "Zorro device %08x", z->id);
- zorro_name_device(z);
- z->resource.name = z->name;
- if (request_resource(zorro_find_parent_resource(z), &z->resource))
- pr_err("Zorro: Address space collision on device %s %pR\n",
- z->name, &z->resource);
- dev_set_name(&z->dev, "%02x", i);
- z->dev.parent = &zorro_bus.dev;
- z->dev.bus = &zorro_bus_type;
- error = device_register(&z->dev);
+ struct zorro_bus *bus;
+ struct zorro_dev *z;
+ struct resource *r;
+ unsigned int i;
+ int error;
+
+ /* Initialize the Zorro bus */
+ bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&bus->devices);
+ bus->dev.parent = &pdev->dev;
+ dev_set_name(&bus->dev, "zorro");
+ error = device_register(&bus->dev);
if (error) {
- pr_err("Zorro: Error registering device %s\n", z->name);
- continue;
+ pr_err("Zorro: Error registering zorro_bus\n");
+ kfree(bus);
+ return error;
}
- error = zorro_create_sysfs_dev_files(z);
- if (error)
- dev_err(&z->dev, "Error creating sysfs files\n");
- }
-
- /* Mark all available Zorro II memory */
- zorro_for_each_dev(z) {
- if (z->rom.er_Type & ERTF_MEMLIST)
- mark_region(zorro_resource_start(z), zorro_resource_end(z)+1, 1);
- }
-
- /* Unmark all used Zorro II memory */
- for (i = 0; i < m68k_num_memory; i++)
- if (m68k_memory[i].addr < 16*1024*1024)
- mark_region(m68k_memory[i].addr,
- m68k_memory[i].addr+m68k_memory[i].size, 0);
-
- return 0;
+ platform_set_drvdata(pdev, bus);
+
+ /* Register all devices */
+ pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n",
+ zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s");
+
+ for (i = 0; i < zorro_num_autocon; i++) {
+ z = &zorro_autocon[i];
+ z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8);
+ if (z->id == ZORRO_PROD_GVP_EPC_BASE) {
+ /* GVP quirk */
+ unsigned long magic = zorro_resource_start(z)+0x8000;
+ z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK;
+ }
+ sprintf(z->name, "Zorro device %08x", z->id);
+ zorro_name_device(z);
+ z->resource.name = z->name;
+ r = zorro_find_parent_resource(pdev, z);
+ error = request_resource(r, &z->resource);
+ if (error)
+ dev_err(&bus->dev,
+ "Address space collision on device %s %pR\n",
+ z->name, &z->resource);
+ dev_set_name(&z->dev, "%02x", i);
+ z->dev.parent = &bus->dev;
+ z->dev.bus = &zorro_bus_type;
+ error = device_register(&z->dev);
+ if (error) {
+ dev_err(&bus->dev, "Error registering device %s\n",
+ z->name);
+ continue;
+ }
+ error = zorro_create_sysfs_dev_files(z);
+ if (error)
+ dev_err(&z->dev, "Error creating sysfs files\n");
+ }
+
+ /* Mark all available Zorro II memory */
+ zorro_for_each_dev(z) {
+ if (z->rom.er_Type & ERTF_MEMLIST)
+ mark_region(zorro_resource_start(z),
+ zorro_resource_end(z)+1, 1);
+ }
+
+ /* Unmark all used Zorro II memory */
+ for (i = 0; i < m68k_num_memory; i++)
+ if (m68k_memory[i].addr < 16*1024*1024)
+ mark_region(m68k_memory[i].addr,
+ m68k_memory[i].addr+m68k_memory[i].size,
+ 0);
+
+ return 0;
}
-subsys_initcall(zorro_init);
+static struct platform_driver amiga_zorro_driver = {
+ .driver = {
+ .name = "amiga-zorro",
+ .owner = THIS_MODULE,
+ },
+};
-EXPORT_SYMBOL(zorro_find_device);
-EXPORT_SYMBOL(zorro_unused_z2ram);
+static int __init amiga_zorro_init(void)
+{
+ return platform_driver_probe(&amiga_zorro_driver, amiga_zorro_probe);
+}
+
+module_init(amiga_zorro_init);
MODULE_LICENSE("GPL");
ret = -EBADF;
goto out_drop_write;
}
+
src = src_file->f_dentry->d_inode;
ret = -EINVAL;
if (src == inode)
goto out_fput;
+ /* the src must be open for reading */
+ if (!(src_file->f_mode & FMODE_READ))
+ goto out_fput;
+
ret = -EISDIR;
if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
goto out_fput;
configfs_detach_group(sd->s_element);
child->d_inode->i_flags |= S_DEAD;
+ dont_mount(child);
mutex_unlock(&child->d_inode->i_mutex);
mutex_lock(&dentry->d_inode->i_mutex);
configfs_remove_dir(item);
dentry->d_inode->i_flags |= S_DEAD;
+ dont_mount(dentry);
mutex_unlock(&dentry->d_inode->i_mutex);
d_delete(dentry);
}
if (ret) {
configfs_detach_item(item);
dentry->d_inode->i_flags |= S_DEAD;
+ dont_mount(dentry);
}
configfs_adjust_dir_dirent_depth_after_populate(sd);
mutex_unlock(&dentry->d_inode->i_mutex);
mutex_unlock(&configfs_symlink_mutex);
configfs_detach_group(&group->cg_item);
dentry->d_inode->i_flags |= S_DEAD;
+ dont_mount(dentry);
mutex_unlock(&dentry->d_inode->i_mutex);
d_delete(dentry);
/* initialize the mount flag and determine the default error handler */
flag = JFS_ERR_REMOUNT_RO;
- if (!parse_options((char *) data, sb, &newLVSize, &flag)) {
- kfree(sbi);
- return -EINVAL;
- }
+ if (!parse_options((char *) data, sb, &newLVSize, &flag))
+ goto out_kfree;
sbi->flag = flag;
#ifdef CONFIG_JFS_POSIX_ACL
if (newLVSize) {
printk(KERN_ERR "resize option for remount only\n");
- return -EINVAL;
+ goto out_kfree;
}
/*
inode = new_inode(sb);
if (inode == NULL) {
ret = -ENOMEM;
- goto out_kfree;
+ goto out_unload;
}
inode->i_ino = 0;
inode->i_nlink = 1;
make_bad_inode(sbi->direct_inode);
iput(sbi->direct_inode);
sbi->direct_inode = NULL;
-out_kfree:
+out_unload:
if (sbi->nls_tab)
unload_nls(sbi->nls_tab);
+out_kfree:
kfree(sbi);
return ret;
}
close_bdev_exclusive(logfs_super(sb)->s_bdev, FMODE_READ|FMODE_WRITE);
}
+static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
+{
+ return 0;
+}
+
static const struct logfs_device_ops bd_devops = {
.find_first_sb = bdev_find_first_sb,
.find_last_sb = bdev_find_last_sb,
.readpage = bdev_readpage,
.writeseg = bdev_writeseg,
.erase = bdev_erase,
+ .can_write_buf = bdev_can_write_buf,
.sync = bdev_sync,
.put_device = bdev_put_device,
};
#include <linux/completion.h>
#include <linux/mount.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
err = mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
page_address(page));
- if (err == -EUCLEAN) {
+ if (err == -EUCLEAN || err == -EBADMSG) {
+ /* -EBADMSG happens regularly on power failures */
err = 0;
/* FIXME: force GC this segment */
}
put_mtd_device(logfs_super(sb)->s_mtd);
}
+static int mtd_can_write_buf(struct super_block *sb, u64 ofs)
+{
+ struct logfs_super *super = logfs_super(sb);
+ void *buf;
+ int err;
+
+ buf = kmalloc(super->s_writesize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ err = mtd_read(sb, ofs, super->s_writesize, buf);
+ if (err)
+ goto out;
+ if (memchr_inv(buf, 0xff, super->s_writesize))
+ err = -EIO;
+ kfree(buf);
+out:
+ return err;
+}
+
static const struct logfs_device_ops mtd_devops = {
.find_first_sb = mtd_find_first_sb,
.find_last_sb = mtd_find_last_sb,
.readpage = mtd_readpage,
.writeseg = mtd_writeseg,
.erase = mtd_erase,
+ .can_write_buf = mtd_can_write_buf,
.sync = mtd_sync,
.put_device = mtd_put_device,
};
const struct logfs_device_ops *devops = &mtd_devops;
mtd = get_mtd_device(NULL, mtdnr);
+ if (IS_ERR(mtd))
+ return PTR_ERR(mtd);
return logfs_get_sb_device(type, flags, mtd, NULL, devops, mnt);
}
static void logfs_invalidatepage(struct page *page, unsigned long offset)
{
- move_page_to_btree(page);
+ struct logfs_block *block = logfs_block(page);
+
+ if (block->reserved_bytes) {
+ struct super_block *sb = page->mapping->host->i_sb;
+ struct logfs_super *super = logfs_super(sb);
+
+ super->s_dirty_pages -= block->reserved_bytes;
+ block->ops->free_block(sb, block);
+ BUG_ON(bitmap_weight(block->alias_map, LOGFS_BLOCK_FACTOR));
+ } else
+ move_page_to_btree(page);
BUG_ON(PagePrivate(page) || page->private);
}
int logfs_fsync(struct file *file, struct dentry *dentry, int datasync)
{
struct super_block *sb = dentry->d_inode->i_sb;
- struct logfs_super *super = logfs_super(sb);
- /* FIXME: write anchor */
- super->s_devops->sync(sb);
+ logfs_write_anchor(sb);
return 0;
}
logfs_safe_iput(inode, cookie);
}
-static u32 logfs_gc_segment(struct super_block *sb, u32 segno, u8 dist)
+static u32 logfs_gc_segment(struct super_block *sb, u32 segno)
{
struct logfs_super *super = logfs_super(sb);
struct logfs_segment_header sh;
segno, (u64)segno << super->s_segshift,
dist, no_free_segments(sb), valid,
super->s_free_bytes);
- cleaned = logfs_gc_segment(sb, segno, dist);
+ cleaned = logfs_gc_segment(sb, segno);
log_gc("GC segment #%02x complete - now %x valid\n", segno,
valid - cleaned);
BUG_ON(cleaned != valid);
{
struct logfs_super *super = logfs_super(sb);
struct logfs_area *area = super->s_area[i];
- struct logfs_object_header oh;
+ gc_level_t gc_level;
+ u32 cleaned, valid, ec;
u32 segno = area->a_segno;
- u32 ofs = area->a_used_bytes;
- __be32 crc;
- int err;
+ u64 ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes);
if (!area->a_is_open)
return 0;
- for (ofs = area->a_used_bytes;
- ofs <= super->s_segsize - sizeof(oh);
- ofs += (u32)be16_to_cpu(oh.len) + sizeof(oh)) {
- err = wbuf_read(sb, dev_ofs(sb, segno, ofs), sizeof(oh), &oh);
- if (err)
- return err;
-
- if (!memchr_inv(&oh, 0xff, sizeof(oh)))
- break;
+ if (super->s_devops->can_write_buf(sb, ofs) == 0)
+ return 0;
- crc = logfs_crc32(&oh, sizeof(oh) - 4, 4);
- if (crc != oh.crc) {
- printk(KERN_INFO "interrupted header at %llx\n",
- dev_ofs(sb, segno, ofs));
- return 0;
- }
- }
- if (ofs != area->a_used_bytes) {
- printk(KERN_INFO "%x bytes unaccounted data found at %llx\n",
- ofs - area->a_used_bytes,
- dev_ofs(sb, segno, area->a_used_bytes));
- area->a_used_bytes = ofs;
- }
+ printk(KERN_INFO"LogFS: Possibly incomplete write at %llx\n", ofs);
+ /*
+ * The device cannot write back the write buffer. Most likely the
+ * wbuf was already written out and the system crashed at some point
+ * before the journal commit happened. In that case we wouldn't have
+ * to do anything. But if the crash happened before the wbuf was
+ * written out correctly, we must GC this segment. So assume the
+ * worst and always do the GC run.
+ */
+ area->a_is_open = 0;
+ valid = logfs_valid_bytes(sb, segno, &ec, &gc_level);
+ cleaned = logfs_gc_segment(sb, segno);
+ if (cleaned != valid)
+ return -EIO;
return 0;
}
inode->i_ctime = CURRENT_TIME;
inode->i_mtime = CURRENT_TIME;
inode->i_nlink = 1;
+ li->li_refcount = 1;
INIT_LIST_HEAD(&li->li_freeing_list);
for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++)
u64 ino;
mutex_lock(&super->s_journal_mutex);
- ino = logfs_seek_hole(super->s_master_inode, super->s_last_ino);
+ ino = logfs_seek_hole(super->s_master_inode, super->s_last_ino + 1);
super->s_last_ino = ino;
super->s_inos_till_wrap--;
if (super->s_inos_till_wrap < 0) {
static int logfs_sync_fs(struct super_block *sb, int wait)
{
- /* FIXME: write anchor */
- logfs_super(sb)->s_devops->sync(sb);
+ logfs_write_anchor(sb);
return 0;
}
ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes);
if (super->s_writesize > 1)
- logfs_buf_recover(area, ofs, a + 1, super->s_writesize);
+ return logfs_buf_recover(area, ofs, a + 1, super->s_writesize);
else
- logfs_buf_recover(area, ofs, NULL, 0);
- return 0;
+ return logfs_buf_recover(area, ofs, NULL, 0);
}
static void *unpack(void *from, void *to)
read_erasecount(sb, unpack(jh, scratch));
break;
case JE_AREA:
- read_area(sb, unpack(jh, scratch));
+ err = read_area(sb, unpack(jh, scratch));
break;
case JE_OBJ_ALIAS:
err = logfs_load_object_aliases(sb, unpack(jh, scratch),
* @erase: erase one segment
* @read: read from the device
* @erase: erase part of the device
+ * @can_write_buf: decide whether wbuf can be written to ofs
*/
struct logfs_device_ops {
struct page *(*find_first_sb)(struct super_block *sb, u64 *ofs);
void (*writeseg)(struct super_block *sb, u64 ofs, size_t len);
int (*erase)(struct super_block *sb, loff_t ofs, size_t len,
int ensure_write);
+ int (*can_write_buf)(struct super_block *sb, u64 ofs);
void (*sync)(struct super_block *sb);
void (*put_device)(struct super_block *sb);
};
int s_lock_count;
mempool_t *s_block_pool; /* struct logfs_block pool */
mempool_t *s_shadow_pool; /* struct logfs_shadow pool */
+ struct list_head s_writeback_list; /* writeback pages */
/*
* Space accounting:
* - s_used_bytes specifies space used to store valid data objects.
int logfs_init_areas(struct super_block *sb);
void logfs_cleanup_areas(struct super_block *sb);
int logfs_open_area(struct logfs_area *area, size_t bytes);
-void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
+int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
int use_filler);
-static inline void logfs_buf_write(struct logfs_area *area, u64 ofs,
+static inline int logfs_buf_write(struct logfs_area *area, u64 ofs,
void *buf, size_t len)
{
- __logfs_buf_write(area, ofs, buf, len, 0);
+ return __logfs_buf_write(area, ofs, buf, len, 0);
}
-static inline void logfs_buf_recover(struct logfs_area *area, u64 ofs,
+static inline int logfs_buf_recover(struct logfs_area *area, u64 ofs,
void *buf, size_t len)
{
- __logfs_buf_write(area, ofs, buf, len, 1);
+ return __logfs_buf_write(area, ofs, buf, len, 1);
}
/* super.c */
return bix;
else if (li->li_data[INDIRECT_INDEX] & LOGFS_FULLY_POPULATED)
bix = maxbix(li->li_height);
+ else if (bix >= maxbix(li->li_height))
+ return bix;
else {
bix = seek_holedata_loop(inode, bix, 0);
if (bix < maxbix(li->li_height))
int get_page_reserve(struct inode *inode, struct page *page)
{
struct logfs_super *super = logfs_super(inode->i_sb);
+ struct logfs_block *block = logfs_block(page);
int ret;
- if (logfs_block(page) && logfs_block(page)->reserved_bytes)
+ if (block && block->reserved_bytes)
return 0;
logfs_get_wblocks(inode->i_sb, page, WF_LOCK);
- ret = logfs_reserve_bytes(inode, 6 * LOGFS_MAX_OBJECTSIZE);
+ while ((ret = logfs_reserve_bytes(inode, 6 * LOGFS_MAX_OBJECTSIZE)) &&
+ !list_empty(&super->s_writeback_list)) {
+ block = list_entry(super->s_writeback_list.next,
+ struct logfs_block, alias_list);
+ block->ops->write_block(block);
+ }
if (!ret) {
alloc_data_block(inode, page);
- logfs_block(page)->reserved_bytes += 6 * LOGFS_MAX_OBJECTSIZE;
+ block = logfs_block(page);
+ block->reserved_bytes += 6 * LOGFS_MAX_OBJECTSIZE;
super->s_dirty_pages += 6 * LOGFS_MAX_OBJECTSIZE;
+ list_move_tail(&block->alias_list, &super->s_writeback_list);
}
logfs_put_wblocks(inode->i_sb, page, WF_LOCK);
return ret;
size = target;
logfs_get_wblocks(sb, NULL, 1);
- err = __logfs_truncate(inode, target);
+ err = __logfs_truncate(inode, size);
if (!err)
err = __logfs_write_inode(inode, 0);
logfs_put_wblocks(sb, NULL, 1);
int min_fill = 3 * super->s_no_blocks;
INIT_LIST_HEAD(&super->s_object_alias);
+ INIT_LIST_HEAD(&super->s_writeback_list);
mutex_init(&super->s_write_mutex);
super->s_block_pool = mempool_create_kmalloc_pool(min_fill,
sizeof(struct logfs_block));
return page;
}
-void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
+int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
int use_filler)
{
pgoff_t index = ofs >> PAGE_SHIFT;
copylen = min((ulong)len, PAGE_SIZE - offset);
page = get_mapping_page(area->a_sb, index, use_filler);
- SetPageUptodate(page);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
BUG_ON(!page); /* FIXME: reserve a pool */
+ SetPageUptodate(page);
memcpy(page_address(page) + offset, buf, copylen);
SetPagePrivate(page);
page_cache_release(page);
offset = 0;
index++;
} while (len);
+ return 0;
}
static void pad_partial_page(struct logfs_area *area)
sb->s_fs_info = super;
sb->s_mtd = super->s_mtd;
sb->s_bdev = super->s_bdev;
+#ifdef CONFIG_BLOCK
if (sb->s_bdev)
sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info;
+#endif
+#ifdef CONFIG_MTD
if (sb->s_mtd)
sb->s_bdi = sb->s_mtd->backing_dev_info;
+#endif
return 0;
}
goto fail;
sb->s_root = d_alloc_root(rootdir);
- if (!sb->s_root)
- goto fail2;
+ if (!sb->s_root) {
+ iput(rootdir);
+ goto fail;
+ }
super->s_erase_page = alloc_pages(GFP_KERNEL, 0);
if (!super->s_erase_page)
- goto fail2;
+ goto fail;
memset(page_address(super->s_erase_page), 0xFF, PAGE_SIZE);
/* FIXME: check for read-only mounts */
err = logfs_make_writeable(sb);
if (err)
- goto fail3;
+ goto fail1;
log_super("LogFS: Finished mounting\n");
simple_set_mnt(mnt, sb);
return 0;
-fail3:
+fail1:
__free_page(super->s_erase_page);
-fail2:
- iput(rootdir);
fail:
iput(logfs_super(sb)->s_master_inode);
return -EIO;
if (!first || IS_ERR(first))
return NULL;
last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]);
- if (!last || IS_ERR(first)) {
+ if (!last || IS_ERR(last)) {
page_cache_release(first);
return NULL;
}
page = find_super_block(sb);
if (!page)
- return -EIO;
+ return -EINVAL;
ds = page_address(page);
super->s_size = be64_to_cpu(ds->ds_filesystem_size);
error = security_inode_rmdir(dir, dentry);
if (!error) {
error = dir->i_op->rmdir(dir, dentry);
- if (!error)
+ if (!error) {
dentry->d_inode->i_flags |= S_DEAD;
+ dont_mount(dentry);
+ }
}
}
mutex_unlock(&dentry->d_inode->i_mutex);
if (!error) {
error = dir->i_op->unlink(dir, dentry);
if (!error)
- dentry->d_inode->i_flags |= S_DEAD;
+ dont_mount(dentry);
}
}
mutex_unlock(&dentry->d_inode->i_mutex);
return error;
target = new_dentry->d_inode;
- if (target) {
+ if (target)
mutex_lock(&target->i_mutex);
- dentry_unhash(new_dentry);
- }
if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
error = -EBUSY;
- else
+ else {
+ if (target)
+ dentry_unhash(new_dentry);
error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
+ }
if (target) {
- if (!error)
+ if (!error) {
target->i_flags |= S_DEAD;
+ dont_mount(new_dentry);
+ }
mutex_unlock(&target->i_mutex);
if (d_unhashed(new_dentry))
d_rehash(new_dentry);
error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
if (!error) {
if (target)
- target->i_flags |= S_DEAD;
+ dont_mount(new_dentry);
if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
d_move(old_dentry, new_dentry);
}
err = -ENOENT;
mutex_lock(&path->dentry->d_inode->i_mutex);
- if (IS_DEADDIR(path->dentry->d_inode))
+ if (cant_mount(path->dentry))
goto out_unlock;
err = security_sb_check_sb(mnt, path);
err = -ENOENT;
mutex_lock(&path->dentry->d_inode->i_mutex);
- if (IS_DEADDIR(path->dentry->d_inode))
+ if (cant_mount(path->dentry))
goto out1;
if (d_unlinked(path->dentry))
if (!check_mnt(root.mnt))
goto out2;
error = -ENOENT;
- if (IS_DEADDIR(new.dentry->d_inode))
+ if (cant_mount(old.dentry))
goto out2;
if (d_unlinked(new.dentry))
goto out2;
#include <linux/path.h> /* struct path */
#include <linux/slab.h> /* kmem_* */
#include <linux/types.h>
+#include <linux/sched.h>
#include "inotify.h"
idr_for_each(&group->inotify_data.idr, idr_callback, group);
idr_remove_all(&group->inotify_data.idr);
idr_destroy(&group->inotify_data.idr);
+ free_uid(group->inotify_data.user);
}
void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
goto out_err;
+ /* we are putting the mark on the idr, take a reference */
+ fsnotify_get_mark(&tmp_ientry->fsn_entry);
+
spin_lock(&group->inotify_data.idr_lock);
ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
group->inotify_data.last_wd+1,
&tmp_ientry->wd);
spin_unlock(&group->inotify_data.idr_lock);
if (ret) {
+ /* we didn't get on the idr, drop the idr reference */
+ fsnotify_put_mark(&tmp_ientry->fsn_entry);
+
/* idr was out of memory allocate and try again */
if (ret == -EAGAIN)
goto retry;
goto out_err;
}
- /* we put the mark on the idr, take a reference */
- fsnotify_get_mark(&tmp_ientry->fsn_entry);
-
/* we are on the idr, now get on the inode */
ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
if (ret) {
/* return the watch descriptor for this new entry */
ret = tmp_ientry->wd;
- /* match the ref from fsnotify_init_markentry() */
- fsnotify_put_mark(&tmp_ientry->fsn_entry);
-
/* if this mark added a new event update the group mask */
if (mask & ~group->mask)
fsnotify_recalc_group_mask(group);
out_err:
- if (ret < 0)
- kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);
+ /* match the ref from fsnotify_init_markentry() */
+ fsnotify_put_mark(&tmp_ientry->fsn_entry);
return ret;
}
name, de->name))
goto found;
}
+ dir_put_page(page);
}
- dir_put_page(page);
if (++n >= npages)
n = 0;
* Atomically reads the value of @v. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
/**
* atomic_set - set atomic variable
#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */
+#define DCACHE_CANT_MOUNT 0x0100
+
extern spinlock_t dcache_lock;
extern seqlock_t rename_lock;
return d_unhashed(dentry) && !IS_ROOT(dentry);
}
+static inline int cant_mount(struct dentry *dentry)
+{
+ return (dentry->d_flags & DCACHE_CANT_MOUNT);
+}
+
+static inline void dont_mount(struct dentry *dentry)
+{
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_CANT_MOUNT;
+ spin_unlock(&dentry->d_lock);
+}
+
static inline struct dentry *dget_parent(struct dentry *dentry)
{
struct dentry *ret;
IFLA_NET_NS_PID,
IFLA_IFALIAS,
IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */
- IFLA_VF_MAC, /* Hardware queue specific attributes */
- IFLA_VF_VLAN,
- IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */
- IFLA_VFINFO,
+ IFLA_VFINFO_LIST,
__IFLA_MAX
};
/* SR-IOV virtual function managment section */
+enum {
+ IFLA_VF_INFO_UNSPEC,
+ IFLA_VF_INFO,
+ __IFLA_VF_INFO_MAX,
+};
+
+#define IFLA_VF_INFO_MAX (__IFLA_VF_INFO_MAX - 1)
+
+enum {
+ IFLA_VF_UNSPEC,
+ IFLA_VF_MAC, /* Hardware queue specific attributes */
+ IFLA_VF_VLAN,
+ IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */
+ __IFLA_VF_MAX,
+};
+
+#define IFLA_VF_MAX (__IFLA_VF_MAX - 1)
+
struct ifla_vf_mac {
__u32 vf;
__u8 mac[32]; /* MAX_ADDR_LEN */
__attribute__((aligned(sizeof(kernel_ulong_t))));
};
+struct zorro_device_id {
+ __u32 id; /* Device ID or ZORRO_WILDCARD */
+ kernel_ulong_t driver_data; /* Data private to the driver */
+};
+
+#define ZORRO_WILDCARD (0xffffffff) /* not official */
+
+#define ZORRO_DEVICE_MODALIAS_FMT "zorro:i%08X"
+
#endif /* LINUX_MOD_DEVICETABLE_H */
extern int platform_add_devices(struct platform_device **, int);
extern struct platform_device *platform_device_register_simple(const char *, int id,
- struct resource *, unsigned int);
+ const struct resource *, unsigned int);
extern struct platform_device *platform_device_register_data(struct device *,
const char *, int, const void *, size_t);
extern struct platform_device *platform_device_alloc(const char *name, int id);
-extern int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num);
+extern int platform_device_add_resources(struct platform_device *pdev,
+ const struct resource *res,
+ unsigned int num);
extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size);
extern int platform_device_add(struct platform_device *pdev);
extern void platform_device_del(struct platform_device *pdev);
typedef phys_addr_t resource_size_t;
typedef struct {
- volatile int counter;
+ int counter;
} atomic_t;
#ifdef CONFIG_64BIT
typedef struct {
- volatile long counter;
+ long counter;
} atomic64_t;
#endif
typedef __u32 zorro_id;
-#define ZORRO_WILDCARD (0xffffffff) /* not official */
-
/* Include the ID list */
#include <linux/zorro_ids.h>
#include <linux/init.h>
#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
#include <asm/zorro.h>
* Zorro bus
*/
-struct zorro_bus {
- struct list_head devices; /* list of devices on this bus */
- unsigned int num_resources; /* number of resources */
- struct resource resources[4]; /* address space routed to this bus */
- struct device dev;
- char name[10];
-};
-
-extern struct zorro_bus zorro_bus; /* single Zorro bus */
extern struct bus_type zorro_bus_type;
- /*
- * Zorro device IDs
- */
-
-struct zorro_device_id {
- zorro_id id; /* Device ID or ZORRO_WILDCARD */
- unsigned long driver_data; /* Data private to the driver */
-};
-
-
/*
* Zorro device drivers
*/
extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *);
extern void tcp_free_md5sig_pool(void);
-extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu);
-extern void __tcp_put_md5sig_pool(void);
+extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
+extern void tcp_put_md5sig_pool(void);
+
extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
unsigned header_len);
extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
struct tcp_md5sig_key *key);
-static inline
-struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
-{
- int cpu = get_cpu();
- struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu);
- if (!ret)
- put_cpu();
- return ret;
-}
-
-static inline void tcp_put_md5sig_pool(void)
-{
- __tcp_put_md5sig_pool();
- put_cpu();
-}
-
/* write queue abstraction */
static inline void tcp_write_queue_purge(struct sock *sk)
{
return 0;
prof_buffer = vmalloc(buffer_bytes);
- if (prof_buffer)
+ if (prof_buffer) {
+ memset(prof_buffer, 0, buffer_bytes);
return 0;
+ }
free_cpumask_var(prof_cpu_mask);
return -ENOMEM;
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
#include <linux/ptrace.h>
#include <linux/security.h>
#include <linux/signal.h>
struct task_struct *child;
long ret;
- /*
- * This lock_kernel fixes a subtle race with suid exec
- */
- lock_kernel();
if (request == PTRACE_TRACEME) {
ret = ptrace_traceme();
if (!ret)
out_put_task_struct:
put_task_struct(child);
out:
- unlock_kernel();
return ret;
}
struct task_struct *child;
long ret;
- /*
- * This lock_kernel fixes a subtle race with suid exec
- */
- lock_kernel();
if (request == PTRACE_TRACEME) {
ret = ptrace_traceme();
goto out;
out_put_task_struct:
put_task_struct(child);
out:
- unlock_kernel();
return ret;
}
#endif /* CONFIG_COMPAT */
unsigned long *node;
node = mempool_alloc(head->mempool, gfp);
- memset(node, 0, NODESIZE);
+ if (likely(node))
+ memset(node, 0, NODESIZE);
return node;
}
a->tx_compressed = b->tx_compressed;
};
+/* All VF info */
static inline int rtnl_vfinfo_size(const struct net_device *dev)
{
- if (dev->dev.parent && dev_is_pci(dev->dev.parent))
- return dev_num_vf(dev->dev.parent) *
- sizeof(struct ifla_vf_info);
- else
+ if (dev->dev.parent && dev_is_pci(dev->dev.parent)) {
+
+ int num_vfs = dev_num_vf(dev->dev.parent);
+ size_t size = nlmsg_total_size(sizeof(struct nlattr));
+ size += nlmsg_total_size(num_vfs * sizeof(struct nlattr));
+ size += num_vfs * (sizeof(struct ifla_vf_mac) +
+ sizeof(struct ifla_vf_vlan) +
+ sizeof(struct ifla_vf_tx_rate));
+ return size;
+ } else
return 0;
}
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(1) /* IFLA_LINKMODE */
+ nla_total_size(4) /* IFLA_NUM_VF */
- + nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */
+ + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */
+ rtnl_link_get_size(dev); /* IFLA_LINKINFO */
}
if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) {
int i;
- struct ifla_vf_info ivi;
- NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
- for (i = 0; i < dev_num_vf(dev->dev.parent); i++) {
+ struct nlattr *vfinfo, *vf;
+ int num_vfs = dev_num_vf(dev->dev.parent);
+
+ NLA_PUT_U32(skb, IFLA_NUM_VF, num_vfs);
+ vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
+ if (!vfinfo)
+ goto nla_put_failure;
+ for (i = 0; i < num_vfs; i++) {
+ struct ifla_vf_info ivi;
+ struct ifla_vf_mac vf_mac;
+ struct ifla_vf_vlan vf_vlan;
+ struct ifla_vf_tx_rate vf_tx_rate;
if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
break;
- NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi);
+ vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = ivi.vf;
+ memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
+ vf_vlan.vlan = ivi.vlan;
+ vf_vlan.qos = ivi.qos;
+ vf_tx_rate.rate = ivi.tx_rate;
+ vf = nla_nest_start(skb, IFLA_VF_INFO);
+ if (!vf) {
+ nla_nest_cancel(skb, vfinfo);
+ goto nla_put_failure;
+ }
+ NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac);
+ NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan);
+ NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate);
+ nla_nest_end(skb, vf);
}
+ nla_nest_end(skb, vfinfo);
}
if (dev->rtnl_link_ops) {
if (rtnl_link_fill(skb, dev) < 0)
[IFLA_LINKINFO] = { .type = NLA_NESTED },
[IFLA_NET_NS_PID] = { .type = NLA_U32 },
[IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
- [IFLA_VF_MAC] = { .type = NLA_BINARY,
- .len = sizeof(struct ifla_vf_mac) },
- [IFLA_VF_VLAN] = { .type = NLA_BINARY,
- .len = sizeof(struct ifla_vf_vlan) },
- [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
- .len = sizeof(struct ifla_vf_tx_rate) },
+ [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
};
EXPORT_SYMBOL(ifla_policy);
[IFLA_INFO_DATA] = { .type = NLA_NESTED },
};
+static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
+ [IFLA_VF_INFO] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
+ [IFLA_VF_MAC] = { .type = NLA_BINARY,
+ .len = sizeof(struct ifla_vf_mac) },
+ [IFLA_VF_VLAN] = { .type = NLA_BINARY,
+ .len = sizeof(struct ifla_vf_vlan) },
+ [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
+ .len = sizeof(struct ifla_vf_tx_rate) },
+};
+
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
{
struct net *net;
return 0;
}
+static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
+{
+ int rem, err = -EINVAL;
+ struct nlattr *vf;
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ nla_for_each_nested(vf, attr, rem) {
+ switch (nla_type(vf)) {
+ case IFLA_VF_MAC: {
+ struct ifla_vf_mac *ivm;
+ ivm = nla_data(vf);
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_mac)
+ err = ops->ndo_set_vf_mac(dev, ivm->vf,
+ ivm->mac);
+ break;
+ }
+ case IFLA_VF_VLAN: {
+ struct ifla_vf_vlan *ivv;
+ ivv = nla_data(vf);
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_vlan)
+ err = ops->ndo_set_vf_vlan(dev, ivv->vf,
+ ivv->vlan,
+ ivv->qos);
+ break;
+ }
+ case IFLA_VF_TX_RATE: {
+ struct ifla_vf_tx_rate *ivt;
+ ivt = nla_data(vf);
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_tx_rate)
+ err = ops->ndo_set_vf_tx_rate(dev, ivt->vf,
+ ivt->rate);
+ break;
+ }
+ default:
+ err = -EINVAL;
+ break;
+ }
+ if (err)
+ break;
+ }
+ return err;
+}
+
static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
struct nlattr **tb, char *ifname, int modified)
{
write_unlock_bh(&dev_base_lock);
}
- if (tb[IFLA_VF_MAC]) {
- struct ifla_vf_mac *ivm;
- ivm = nla_data(tb[IFLA_VF_MAC]);
- err = -EOPNOTSUPP;
- if (ops->ndo_set_vf_mac)
- err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac);
- if (err < 0)
- goto errout;
- modified = 1;
- }
-
- if (tb[IFLA_VF_VLAN]) {
- struct ifla_vf_vlan *ivv;
- ivv = nla_data(tb[IFLA_VF_VLAN]);
- err = -EOPNOTSUPP;
- if (ops->ndo_set_vf_vlan)
- err = ops->ndo_set_vf_vlan(dev, ivv->vf,
- ivv->vlan,
- ivv->qos);
- if (err < 0)
- goto errout;
- modified = 1;
- }
- err = 0;
-
- if (tb[IFLA_VF_TX_RATE]) {
- struct ifla_vf_tx_rate *ivt;
- ivt = nla_data(tb[IFLA_VF_TX_RATE]);
- err = -EOPNOTSUPP;
- if (ops->ndo_set_vf_tx_rate)
- err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate);
- if (err < 0)
- goto errout;
- modified = 1;
+ if (tb[IFLA_VFINFO_LIST]) {
+ struct nlattr *attr;
+ int rem;
+ nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
+ if (nla_type(attr) != IFLA_VF_INFO)
+ goto errout;
+ err = do_setvfinfo(dev, attr);
+ if (err < 0)
+ goto errout;
+ modified = 1;
+ }
}
err = 0;
if (p->md5_desc.tfm)
crypto_free_hash(p->md5_desc.tfm);
kfree(p);
- p = NULL;
}
}
free_percpu(pool);
EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
-struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
+
+/**
+ * tcp_get_md5sig_pool - get md5sig_pool for this user
+ *
+ * We use percpu structure, so if we succeed, we exit with preemption
+ * and BH disabled, to make sure another thread or softirq handling
+ * wont try to get same context.
+ */
+struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
{
struct tcp_md5sig_pool * __percpu *p;
- spin_lock_bh(&tcp_md5sig_pool_lock);
+
+ local_bh_disable();
+
+ spin_lock(&tcp_md5sig_pool_lock);
p = tcp_md5sig_pool;
if (p)
tcp_md5sig_users++;
- spin_unlock_bh(&tcp_md5sig_pool_lock);
- return (p ? *per_cpu_ptr(p, cpu) : NULL);
-}
+ spin_unlock(&tcp_md5sig_pool_lock);
+
+ if (p)
+ return *per_cpu_ptr(p, smp_processor_id());
-EXPORT_SYMBOL(__tcp_get_md5sig_pool);
+ local_bh_enable();
+ return NULL;
+}
+EXPORT_SYMBOL(tcp_get_md5sig_pool);
-void __tcp_put_md5sig_pool(void)
+void tcp_put_md5sig_pool(void)
{
+ local_bh_enable();
tcp_free_md5sig_pool();
}
-
-EXPORT_SYMBOL(__tcp_put_md5sig_pool);
+EXPORT_SYMBOL(tcp_put_md5sig_pool);
int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
struct tcphdr *th)
del_timer(&transport->T3_rtx_timer))
sctp_transport_put(transport);
+ /* Delete the ICMP proto unreachable timer if it's active. */
+ if (timer_pending(&transport->proto_unreach_timer) &&
+ del_timer(&transport->proto_unreach_timer))
+ sctp_association_put(transport->asoc);
sctp_transport_put(transport);
}
return 1;
}
+/* Looks like: zorro:iN. */
+static int do_zorro_entry(const char *filename, struct zorro_device_id *id,
+ char *alias)
+{
+ id->id = TO_NATIVE(id->id);
+ strcpy(alias, "zorro:");
+ ADD(alias, "i", id->id != ZORRO_WILDCARD, id->id);
+ return 1;
+}
+
/* Ignore any prefix, eg. some architectures prepend _ */
static inline int sym_is(const char *symbol, const char *name)
{
do_table(symval, sym->st_size,
sizeof(struct platform_device_id), "platform",
do_platform_entry, mod);
+ else if (sym_is(symname, "__mod_zorro_device_table"))
+ do_table(symval, sym->st_size,
+ sizeof(struct zorro_device_id), "zorro",
+ do_zorro_entry, mod);
free(zeros);
}
#include <linux/ioport.h>
#include <linux/soundcard.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <asm/uaccess.h>
#include <asm/setup.h>
/*** Config & Setup **********************************************************/
-static int __init dmasound_paula_init(void)
+static int __init amiga_audio_probe(struct platform_device *pdev)
{
- int err;
-
- if (MACH_IS_AMIGA && AMIGAHW_PRESENT(AMI_AUDIO)) {
- if (!request_mem_region(CUSTOM_PHYSADDR+0xa0, 0x40,
- "dmasound [Paula]"))
- return -EBUSY;
- dmasound.mach = machAmiga;
- dmasound.mach.default_hard = def_hard ;
- dmasound.mach.default_soft = def_soft ;
- err = dmasound_init();
- if (err)
- release_mem_region(CUSTOM_PHYSADDR+0xa0, 0x40);
- return err;
- } else
- return -ENODEV;
+ dmasound.mach = machAmiga;
+ dmasound.mach.default_hard = def_hard ;
+ dmasound.mach.default_soft = def_soft ;
+ return dmasound_init();
}
-static void __exit dmasound_paula_cleanup(void)
+static int __exit amiga_audio_remove(struct platform_device *pdev)
{
dmasound_deinit();
- release_mem_region(CUSTOM_PHYSADDR+0xa0, 0x40);
+ return 0;
+}
+
+static struct platform_driver amiga_audio_driver = {
+ .remove = __exit_p(amiga_audio_remove),
+ .driver = {
+ .name = "amiga-audio",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init amiga_audio_init(void)
+{
+ return platform_driver_probe(&amiga_audio_driver, amiga_audio_probe);
}
-module_init(dmasound_paula_init);
-module_exit(dmasound_paula_cleanup);
+module_init(amiga_audio_init);
+
+static void __exit amiga_audio_exit(void)
+{
+ platform_driver_unregister(&amiga_audio_driver);
+}
+
+module_exit(amiga_audio_exit);
+
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:amiga-audio");
err = event__synthesize_kernel_mmap(process_synthesized_event,
session, "_text");
+ if (err < 0)
+ err = event__synthesize_kernel_mmap(process_synthesized_event,
+ session, "_stext");
if (err < 0) {
pr_err("Couldn't record kernel reference relocation symbol.\n");
return err;