pass_to_upper_layers(cp->rx_buf);
make_and_setup_new_rx_buf(cp);
} else {
- /* Just sync the buffer and give it back
- * to the card.
+ /* CPU should not write to
+ * DMA_FROM_DEVICE-mapped area,
+ * so dma_sync_single_for_device() is
+ * not needed here. It would be required
+ * for DMA_BIDIRECTIONAL mapping if
+ * the memory was modified.
*/
- dma_sync_single_for_device(&cp->dev,
- cp->rx_dma,
- cp->rx_len,
- DMA_FROM_DEVICE);
give_rx_buf_to_card(cp);
}
}
5.2 stat file
-memory.stat file includes following statistics
+5.2.1 memory.stat file includes following statistics
# per-memory cgroup local status
cache - # of bytes of page cache memory.
file_mapped is accounted only when the memory cgroup is owner of page
cache.)
+5.2.2 memory.vmscan_stat
+
+memory.vmscan_stat includes statistics information for memory scanning and
+freeing, reclaiming. The statistics shows memory scanning information since
+memory cgroup creation and can be reset to 0 by writing 0 as
+
+ #echo 0 > ../memory.vmscan_stat
+
+This file contains following statistics.
+
+[param]_[file_or_anon]_pages_by_[reason]_[under_heararchy]
+[param]_elapsed_ns_by_[reason]_[under_hierarchy]
+
+For example,
+
+ scanned_file_pages_by_limit indicates the number of scanned
+ file pages at vmscan.
+
+Now, 3 parameters are supported
+
+ scanned - the number of pages scanned by vmscan
+ rotated - the number of pages activated at vmscan
+ freed - the number of pages freed by vmscan
+
+If "rotated" is high against scanned/freed, the memcg seems busy.
+
+Now, 2 reason are supported
+
+ limit - the memory cgroup's limit
+ system - global memory pressure + softlimit
+ (global memory pressure not under softlimit is not handled now)
+
+When under_hierarchy is added in the tail, the number indicates the
+total memcg scan of its children and itself.
+
+elapsed_ns is a elapsed time in nanosecond. This may include sleep time
+and not indicates CPU usage. So, please take this as just showing
+latency.
+
+Here is an example.
+
+# cat /cgroup/memory/A/memory.vmscan_stat
+scanned_pages_by_limit 9471864
+scanned_anon_pages_by_limit 6640629
+scanned_file_pages_by_limit 2831235
+rotated_pages_by_limit 4243974
+rotated_anon_pages_by_limit 3971968
+rotated_file_pages_by_limit 272006
+freed_pages_by_limit 2318492
+freed_anon_pages_by_limit 962052
+freed_file_pages_by_limit 1356440
+elapsed_ns_by_limit 351386416101
+scanned_pages_by_system 0
+scanned_anon_pages_by_system 0
+scanned_file_pages_by_system 0
+rotated_pages_by_system 0
+rotated_anon_pages_by_system 0
+rotated_file_pages_by_system 0
+freed_pages_by_system 0
+freed_anon_pages_by_system 0
+freed_file_pages_by_system 0
+elapsed_ns_by_system 0
+scanned_pages_by_limit_under_hierarchy 9471864
+scanned_anon_pages_by_limit_under_hierarchy 6640629
+scanned_file_pages_by_limit_under_hierarchy 2831235
+rotated_pages_by_limit_under_hierarchy 4243974
+rotated_anon_pages_by_limit_under_hierarchy 3971968
+rotated_file_pages_by_limit_under_hierarchy 272006
+freed_pages_by_limit_under_hierarchy 2318492
+freed_anon_pages_by_limit_under_hierarchy 962052
+freed_file_pages_by_limit_under_hierarchy 1356440
+elapsed_ns_by_limit_under_hierarchy 351386416101
+scanned_pages_by_system_under_hierarchy 0
+scanned_anon_pages_by_system_under_hierarchy 0
+scanned_file_pages_by_system_under_hierarchy 0
+rotated_pages_by_system_under_hierarchy 0
+rotated_anon_pages_by_system_under_hierarchy 0
+rotated_file_pages_by_system_under_hierarchy 0
+freed_pages_by_system_under_hierarchy 0
+freed_anon_pages_by_system_under_hierarchy 0
+freed_file_pages_by_system_under_hierarchy 0
+elapsed_ns_by_system_under_hierarchy 0
+
5.3 swappiness
Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
----------------------------
-What: DMA_xxBIT_MASK macros
-When: Jun 2011
-Why: DMA_xxBIT_MASK macros were replaced with DMA_BIT_MASK() macros.
-Who: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
-
-----------------------------
-
What: iwlwifi disable_hw_scan module parameters
When: 3.0
Why: Hareware scan is the prefer method for iwlwifi devices for
See Documentation/sound/oss/oss-parameters.txt
panic= [KNL] Kernel behaviour on panic: delay <timeout>
- seconds before rebooting
+ timeout > 0: seconds before rebooting
+ timeout = 0: wait forever
+ timeout < 0: reboot immediately
Format: <timeout>
parkbd.port= [HW] Parallel port number the keyboard adapter is
- rtsig-nr
- sem
- sg-big-buff [ generic SCSI device (sg) ]
+- shm_rmid_forced
- shmall
- shmmax [ sysv ipc ]
- shmmni
==============================================================
+shm_rmid_forced:
+
+Linux lets you set resource limits, including how much memory one
+process can consume, via setrlimit(2). Unfortunately, shared memory
+segments are allowed to exist without association with any process, and
+thus might not be counted against any resource limits. If enabled,
+shared memory segments are automatically destroyed when their attach
+count becomes zero after a detach or a process termination. It will
+also destroy segments that were created, but never attached to, on exit
+from the process. The only use left for IPC_RMID is to immediately
+destroy an unattached segment. Of course, this breaks the way things are
+defined, so some applications might stop working. Note that this
+feature will do you no good unless you also configure your resource
+limits (in particular, RLIMIT_AS and RLIMIT_NPROC). Most systems don't
+need this.
+
+Note that if you change this from 0 to 1, already created segments
+without users and with a dead originative process will be destroyed.
+
+==============================================================
+
softlockup_thresh:
This value can be used to lower the softlockup tolerance threshold. The
MIPS
M: Ralf Baechle <ralf@linux-mips.org>
-W: http://www.linux-mips.org/
L: linux-mips@linux-mips.org
+W: http://www.linux-mips.org/
T: git git://git.linux-mips.org/pub/scm/linux.git
+Q: http://patchwork.linux-mips.org/project/linux-mips/list/
S: Supported
F: Documentation/mips/
F: arch/mips/
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
- * atomic_add_unless - add unless the number is a given value
+ * __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Returns the old value of @v.
*/
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
break;
c = old;
}
- return c != (u);
+ return c;
}
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/**
* atomic64_add_unless - add unless the number is a given value
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Returns the old value of @v.
*/
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
-#include <asm-generic/atomic-long.h>
#endif /* _ALPHA_ATOMIC_H */
#include <asm-generic/bitops/le.h>
-#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
-#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* __KERNEL__ */
#define _ALPHA_LOCAL_H
#include <linux/percpu.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
typedef struct
{
#define user_mode(regs) (((regs)->ps & 8) != 0)
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
-extern void show_regs(struct pt_regs *);
#define task_pt_regs(task) \
((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1)
#include <linux/init.h>
#include <asm/hwrpb.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/pal.h>
#include <asm/hwrpb.h>
#include <asm/ptrace.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/irq.h>
*/
#include <linux/spinlock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
asm (".text \n\
.global _atomic_dec_and_lock \n\
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
c = old;
- return c != u;
+ return c;
}
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
-#else /* !CONFIG_GENERIC_ATOMIC64 */
-#include <asm-generic/atomic64.h>
-#endif
-#include <asm-generic/atomic-long.h>
+#endif /* !CONFIG_GENERIC_ATOMIC64 */
#endif
#endif
/*
* Ext2 is defined to use little-endian byte ordering.
*/
-#define ext2_set_bit_atomic(lock, nr, p) \
- test_and_set_bit_le(nr, p)
-#define ext2_clear_bit_atomic(lock, nr, p) \
- test_and_clear_bit_le(nr, p)
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* __KERNEL__ */
#include <linux/clockchips.h>
#include <linux/completion.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <linux/init.h>
#include <linux/sched.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/system.h>
#include <asm/unistd.h>
#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/mach/time.h>
#include <asm/mach/irq.h>
#include <linux/mm.h>
#include <linux/pfn.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <mach/dma.h>
/* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */
#ifndef __CNS3XXX_PM_H
#define __CNS3XXX_PM_H
-#include <asm/atomic.h>
+#include <linux/atomic.h>
void cns3xxx_pwr_clk_en(unsigned int block);
void cns3xxx_pwr_clk_dis(unsigned int block);
#include <linux/module.h>
#include <linux/io.h>
#include <linux/delay.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <mach/system.h>
#include <mach/cns3xxx.h>
#include <mach/pm.h>
#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/mach/time.h>
#include <asm/mach/irq.h>
#include <linux/io.h>
#include <mach/hardware.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/irq.h>
#include <mach/regs-clock.h>
#include <linux/io.h>
#include <mach/hardware.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/irq.h>
#include <mach/regs-clock.h>
#include <linux/io.h>
#include <mach/hardware.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/irq.h>
#include <mach/regs-clock.h>
/*
* atomic_sub_unless - sub unless the number is a given value
* @v: pointer of type atomic_t
- * @a: the amount to add to v...
+ * @a: the amount to subtract from v...
* @u: ...unless v is equal to u.
*
- * If the atomic value v is not equal to u, this function subtracts a
- * from v, and returns non zero. If v is equal to u then it returns
- * zero. This is done as an atomic operation.
+ * Atomically subtract @a from @v, so long as it was not @u.
+ * Returns the old value of @v.
*/
-static inline int atomic_sub_unless(atomic_t *v, int a, int u)
+static inline void atomic_sub_unless(atomic_t *v, int a, int u)
{
- int tmp, result = 0;
+ int tmp;
asm volatile(
"/* atomic_sub_unless */\n"
"1: ssrf 5\n"
- " ld.w %0, %3\n"
- " cp.w %0, %5\n"
+ " ld.w %0, %2\n"
+ " cp.w %0, %4\n"
" breq 1f\n"
- " sub %0, %4\n"
- " stcond %2, %0\n"
+ " sub %0, %3\n"
+ " stcond %1, %0\n"
" brne 1b\n"
- " mov %1, 1\n"
"1:"
- : "=&r"(tmp), "=&r"(result), "=o"(v->counter)
- : "m"(v->counter), "rKs21"(a), "rKs21"(u), "1"(result)
+ : "=&r"(tmp), "=o"(v->counter)
+ : "m"(v->counter), "rKs21"(a), "rKs21"(u)
: "cc", "memory");
-
- return result;
}
/*
- * atomic_add_unless - add unless the number is a given value
+ * __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
- * If the atomic value v is not equal to u, this function adds a to v,
- * and returns non zero. If v is equal to u then it returns zero. This
- * is done as an atomic operation.
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v.
*/
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
- int tmp, result;
+ int tmp, old = atomic_read(v);
if (__builtin_constant_p(a) && (a >= -1048575) && (a <= 1048576))
- result = atomic_sub_unless(v, -a, u);
+ atomic_sub_unless(v, -a, u);
else {
- result = 0;
asm volatile(
- "/* atomic_add_unless */\n"
+ "/* __atomic_add_unless */\n"
"1: ssrf 5\n"
- " ld.w %0, %3\n"
- " cp.w %0, %5\n"
+ " ld.w %0, %2\n"
+ " cp.w %0, %4\n"
" breq 1f\n"
- " add %0, %4\n"
- " stcond %2, %0\n"
+ " add %0, %3\n"
+ " stcond %1, %0\n"
" brne 1b\n"
- " mov %1, 1\n"
"1:"
- : "=&r"(tmp), "=&r"(result), "=o"(v->counter)
- : "m"(v->counter), "r"(a), "ir"(u), "1"(result)
+ : "=&r"(tmp), "=o"(v->counter)
+ : "m"(v->counter), "r"(a), "ir"(u)
: "cc", "memory");
}
- return result;
+ return old;
}
/*
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
-#define atomic_inc_not_zero(v) atomic_add_unless(v, 1, 0)
#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
-#include <asm-generic/atomic-long.h>
-
#endif /* __ASM_AVR32_ATOMIC_H */
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
-extern void show_regs (struct pt_regs *);
-
static __inline__ int valid_user_regs(struct pt_regs *regs)
{
/*
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_add_unless(v, a, u) \
+#define __atomic_add_unless(v, a, u) \
({ \
int c, old; \
c = atomic_read(v); \
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
c = old; \
- c != (u); \
+ c; \
})
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/*
* atomic_inc_and_test - increment and test
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-#include <asm-generic/atomic-long.h>
#endif
-#include <asm-generic/atomic64.h>
-
#endif
#include <linux/interrupt.h>
#include <mach/dma.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/blackfin.h>
#include <asm/page.h>
#include <asm-generic/dma.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
#include <asm/bitops.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/traps.h>
#include <asm/bitsperlong.h>
/* user_mode returns true if only one bit is set in IPEND, other than the
master interrupt enable. */
#define user_mode(regs) (!(((regs)->ipend & ~0x10) & (((regs)->ipend & ~0x10) - 1)))
-extern void show_regs(struct pt_regs *);
#define arch_has_single_step() (1)
/* common code demands this function */
# include <asm-generic/spinlock.h>
#else
-#include <asm/atomic.h>
+#include <linux/atomic.h>
asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_DYNAMIC_FTRACE
#include <linux/unistd.h>
#include <linux/io.h>
#include <asm/system.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/irq_handler.h>
DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
#include <linux/smp.h>
#include <linux/timer.h>
#include <asm/blackfin.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/bfin_watchdog.h>
#include <linux/seq_file.h>
#include <linux/irq.h>
#include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/irq_handler.h>
#include <asm/mmu_context.h>
#include <asm/uaccess.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <asm/mmu_context.h>
#include <hwregs/asm/mmu_defs_asm.h>
#include <hwregs/supp_reg.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/err.h>
#include <linux/init.h>
#define user_mode(regs) (((regs)->dccr & 0x100) != 0)
#define instruction_pointer(regs) ((regs)->irp)
#define profile_pc(regs) instruction_pointer(regs)
-extern void show_regs(struct pt_regs *);
#endif /* __KERNEL__ */
#define arch_has_single_step() (1)
#define user_mode(regs) (((regs)->ccs & (1 << (U_CCS_BITNR + CCS_SHIFT))) != 0)
#define instruction_pointer(regs) ((regs)->erp)
-extern void show_regs(struct pt_regs *);
#define profile_pc(regs) instruction_pointer(regs)
#endif /* __KERNEL__ */
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
if (ret != u)
v->counter += a;
cris_atomic_restore(v, flags);
- return ret != u;
+ return ret;
}
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
-#include <asm-generic/atomic-long.h>
#endif
#include <arch/bitops.h>
#include <asm/system.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/compiler.h>
/*
#include <asm-generic/bitops/le.h>
-#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
-#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
#include <asm-generic/bitops/sched.h>
* This file handles the architecture-dependent parts of process handling..
*/
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
#define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
#define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
break;
c = old;
}
- return c != (u);
+ return c;
}
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-#include <asm-generic/atomic-long.h>
#endif /* _ASM_ATOMIC_H */
#include <asm-generic/bitops/le.h>
-#define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit ((nr) ^ 0x18, (addr))
-#define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr) ^ 0x18, (addr))
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
#endif /* __KERNEL__ */
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
-#include <asm/atomic.h>
+#include <linux/atomic.h>
extern atomic_t irq_err_count;
static inline void ack_bad_irq(int irq)
*/
#define start_thread(_regs, _pc, _usp) \
do { \
- set_fs(USER_DS); /* reads from user space */ \
__frame = __kernel_frame0_ptr; \
__frame->pc = (_pc); \
__frame->psr &= ~PSR_S; \
#define user_stack_pointer(regs) ((regs)->sp)
extern unsigned long user_stack(const struct pt_regs *);
-extern void show_regs(struct pt_regs *);
#define profile_pc(regs) ((regs)->pc)
#define task_pt_regs(task) ((task)->thread.frame0)
#include <linux/module.h>
#include <linux/bitops.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/system.h>
void flush_thread(void)
{
-#if 0 //ndef NO_FPU
- unsigned long zero = 0;
-#endif
- set_fs(USER_DS);
+ /* nothing */
}
inline unsigned long user_stack(const struct pt_regs *regs)
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
if (ret != u)
v->counter += a;
local_irq_restore(flags);
- return ret != u;
+ return ret;
}
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
{
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
-#include <asm-generic/atomic-long.h>
#endif /* __ARCH_H8300_ATOMIC __ */
#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
#undef __FD_ISSET
-#define __FD_ISSET(d, set) ((set)->fds_bits[__FDELT(d)] & __FDMASK(d))
+#define __FD_ISSET(d, set) (!!((set)->fds_bits[__FDELT(d)] & __FDMASK(d)))
#undef __FD_ZERO
#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
#define user_mode(regs) (!((regs)->ccr & PS_S))
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
-extern void show_regs(struct pt_regs *);
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _H8300_PTRACE_H */
(cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
break;
c = old;
}
- return c != (u);
+ return c;
}
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
{
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
-#include <asm-generic/atomic-long.h>
#endif /* _ASM_IA64_ATOMIC_H */
#include <asm-generic/bitops/le.h>
-#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
-#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
#include <asm-generic/bitops/sched.h>
#include <asm/percpu.h>
#include <asm/rse.h>
#include <asm/unwind.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#ifdef CONFIG_NUMA
#include <asm/nodedata.h>
#endif
struct task_struct; /* forward decl */
struct unw_frame_info; /* forward decl */
- extern void show_regs (struct pt_regs *);
extern void ia64_do_show_stack (struct unw_frame_info *, void *);
extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
unsigned long *);
#include <linux/kernel.h>
#include <linux/bitops.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/intrinsics.h>
#include <asm/system.h>
#include <linux/bitops.h>
#include <linux/kexec.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/current.h>
#include <asm/delay.h>
#include <asm/machvec.h>
#include <linux/percpu.h>
#include <linux/bitops.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/cache.h>
#include <asm/current.h>
#include <asm/delay.h>
#include <asm/pal.h>
#include <asm/system.h>
#include <asm/pgtable.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/tlbflush.h>
#include <asm/sn/arch.h>
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
- * atomic_add_unless - add unless the number is a given value
+ * __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Returns the old value of @v.
*/
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
break;
c = old;
}
- return c != (u);
+ return c;
}
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
{
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
-#include <asm-generic/atomic-long.h>
#endif /* _ASM_M32R_ATOMIC_H */
#ifndef __ASSEMBLY__
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/pgalloc.h>
#include <asm/mmu.h>
#include <asm/tlbflush.h>
#define instruction_pointer(regs) ((regs)->bpc)
#define profile_pc(regs) instruction_pointer(regs)
-extern void show_regs(struct pt_regs *);
-
extern void withdraw_debug_trap(struct pt_regs *regs);
#define task_pt_regs(task) \
*/
#include <linux/compiler.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/page.h>
/*
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/m32r.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/smp.h>
__asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
}
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
break;
c = old;
}
- return c != (u);
+ return c;
}
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
-#include <asm-generic/atomic-long.h>
-#include <asm-generic/atomic64.h>
#endif /* __ARCH_M68K_ATOMIC __ */
#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
#undef __FD_ISSET
-#define __FD_ISSET(d, set) ((set)->fds_bits[__FDELT(d)] & __FDMASK(d))
+#define __FD_ISSET(d, set) (!!((set)->fds_bits[__FDELT(d)] & __FDMASK(d)))
#undef __FD_ZERO
#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
#define user_mode(regs) (!((regs)->sr & PS_S))
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
-extern void show_regs(struct pt_regs *);
#define arch_has_single_step() (1)
#ifndef _ASM_MICROBLAZE_MMU_CONTEXT_H
#define _ASM_MICROBLAZE_MMU_CONTEXT_H
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/bitops.h>
#include <asm/mmu.h>
#include <asm-generic/mm_hooks.h>
#include <linux/types.h>
#include <asm/irq.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define HAVE_ARCH_DEVTREE_FIXUPS
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
-void show_regs(struct pt_regs *);
-
#else /* __KERNEL__ */
/* pt_regs offsets used by gdbserver etc in ptrace syscalls */
platforms += loongson
platforms += mipssim
platforms += mti-malta
+platforms += netlogic
platforms += pmc-sierra
platforms += pnx833x
platforms += pnx8550
#
include $(srctree)/arch/mips/Kbuild.platforms
-#
-# NETLOGIC SOC Common (common)
-#
-cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/mach-netlogic
-cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/netlogic
-
-#
-# NETLOGIC XLR/XLS SoC, Simulator and boards
-#
-core-$(CONFIG_NLM_XLR) += arch/mips/netlogic/xlr/
-load-$(CONFIG_NLM_XLR_BOARD) += 0xffffffff84000000
-
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
drivers-$(CONFIG_PCI) += arch/mips/pci/
return &vbus_clk;
if (!strcmp(id, "cpu"))
return &cpu_clk;
- if (!strcmp(id, "dsp"));
+ if (!strcmp(id, "dsp"))
return &dsp_clk;
if (!strcmp(id, "vbus"))
return &vbus_clk;
.name = "irq",
.flags = IORESOURCE_IRQ,
.start = 27,
- .end = 27,
+ .end = 27,
},
};
u16 csum;
u8 len;
char data[11];
-} __attribute__ ((packed));
+} __packed;
struct psp_var_map_entry {
u8 num;
#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
/**
- * atomic_add_unless - add unless the number is a given value
+ * __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Returns the old value of @v.
*/
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
break;
c = old;
}
- return c != (u);
+ return c;
}
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Returns the old value of @v.
*/
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
*/
#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
-#else /* !CONFIG_64BIT */
-
-#include <asm-generic/atomic64.h>
-
#endif /* CONFIG_64BIT */
/*
#define smp_mb__before_atomic_inc() smp_mb__before_llsc()
#define smp_mb__after_atomic_inc() smp_llsc_mb()
-#include <asm-generic/atomic-long.h>
-
#endif /* _ASM_ATOMIC_H */
#define _ASM_FIXMAP_H
#include <asm/page.h>
+#include <spaces.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
* the start of the fixmap, and leave one page empty
* at the top of mem..
*/
-#ifdef CONFIG_BCM63XX
-#define FIXADDR_TOP ((unsigned long)(long)(int)0xff000000)
-#else
-#if defined(CONFIG_CPU_TX39XX) || defined(CONFIG_CPU_TX49XX)
-#define FIXADDR_TOP ((unsigned long)(long)(int)(0xff000000 - 0x20000))
-#else
-#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
-#endif
-#endif
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#ifndef _ASM_GT64120_H
#define _ASM_GT64120_H
-#include <linux/clocksource.h>
-
#include <asm/addrspace.h>
#include <asm/byteorder.h>
#ifndef __ASM_HW_IRQ_H
#define __ASM_HW_IRQ_H
-#include <asm/atomic.h>
+#include <linux/atomic.h>
extern atomic_t irq_err_count;
static inline void irq_dispose_mapping(unsigned int virq)
{
- return;
}
#ifdef CONFIG_I8259
#include <linux/percpu.h>
#include <linux/bitops.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/cmpxchg.h>
#include <asm/war.h>
/* Interrupt Mask register */
#define PERF_IRQMASK_REG 0xc
-#define PERF_IRQSTAT_REG 0x10
/* Interrupt Status register */
#define PERF_IRQSTAT_REG 0x10
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_BCM63XX_SPACES_H
+#define _ASM_BCM63XX_SPACES_H
+
+#define FIXADDR_TOP ((unsigned long)(long)(int)0xff000000)
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* __ASM_BCM63XX_SPACES_H */
static inline void plat_extra_sync_for_device(struct device *dev)
{
- return;
}
static inline int plat_dma_mapping_error(struct device *dev,
#define PAGE_OFFSET (CAC_BASE + PHYS_OFFSET)
#endif
+#ifndef FIXADDR_TOP
+#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
+#endif
+
#endif /* __ASM_MACH_GENERIC_SPACES_H */
static inline void plat_extra_sync_for_device(struct device *dev)
{
- return;
}
static inline int plat_dma_mapping_error(struct device *dev,
static inline void plat_extra_sync_for_device(struct device *dev)
{
- return;
}
static inline int plat_dma_mapping_error(struct device *dev,
static inline void plat_extra_sync_for_device(struct device *dev)
{
- return;
}
static inline int plat_dma_mapping_error(struct device *dev,
/* #define cpu_has_vtag_icache ? */
/* #define cpu_has_dc_aliases ? */
/* #define cpu_has_ic_fills_f_dc ? */
+#define cpu_has_clo_clz 1
#define cpu_has_nofpuex 0
/* #define cpu_has_64bits ? */
/* #define cpu_has_64bit_zero_reg ? */
/* #define cpu_has_vtag_icache ? */
/* #define cpu_has_dc_aliases ? */
/* #define cpu_has_ic_fills_f_dc ? */
+#define cpu_has_clo_clz 1
#define cpu_has_nofpuex 0
/* #define cpu_has_64bits ? */
/* #define cpu_has_64bit_zero_reg ? */
/* #define cpu_has_vtag_icache ? */
/* #define cpu_has_dc_aliases ? */
/* #define cpu_has_ic_fills_f_dc ? */
+#define cpu_has_clo_clz 1
#define cpu_has_nofpuex 0
/* #define cpu_has_64bits ? */
/* #define cpu_has_64bit_zero_reg ? */
/* #define cpu_has_vtag_icache ? */
/* #define cpu_has_dc_aliases ? */
/* #define cpu_has_ic_fills_f_dc ? */
+#define cpu_has_clo_clz 1
#define cpu_has_nofpuex 0
/* #define cpu_has_64bits ? */
/* #define cpu_has_64bit_zero_reg ? */
--- /dev/null
+/*
+ * Copyright (C) 2010 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _ASM_MACH_POWERTV_CPU_FEATURE_OVERRIDES_H_
+#define _ASM_MACH_POWERTV_CPU_FEATURE_OVERRIDES_H_
+#define cpu_has_tlb 1
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_fpu 0
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_divec 1
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_mcheck 1
+#define cpu_has_ejtag 1
+#define cpu_has_llsc 1
+#define cpu_has_mips16 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+#define cpu_has_vtag_icache 0
+#define cpu_has_dc_aliases 0
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 1
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+#define cpu_has_dsp 0
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+#define cpu_has_nofpuex 0
+#define cpu_has_64bits 0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_vint 1
+#define cpu_has_veic 1
+#define cpu_has_inclusive_pcaches 0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+#endif
static inline void plat_extra_sync_for_device(struct device *dev)
{
- return;
}
static inline int plat_dma_mapping_error(struct device *dev,
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_TX39XX_SPACES_H
+#define _ASM_TX39XX_SPACES_H
+
+#define FIXADDR_TOP ((unsigned long)(long)(int)0xfefe0000)
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* __ASM_TX39XX_SPACES_H */
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
+ * Copyright (C) 2000, 2002 Maciej W. Rozycki
+ * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_TX49XX_SPACES_H
+#define _ASM_TX49XX_SPACES_H
+
+#define FIXADDR_TOP ((unsigned long)(long)(int)0xfefe0000)
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* __ASM_TX49XX_SPACES_H */
* constraints placed on us by the cache architecture.
*/
#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
/*
* No page table caches to initialise
#ifndef __ASM_SMP_OPS_H
#define __ASM_SMP_OPS_H
+#include <linux/errno.h>
+
#ifdef CONFIG_SMP
#include <linux/cpumask.h>
#endif /* !CONFIG_SMP */
-extern struct plat_smp_ops up_smp_ops;
-extern struct plat_smp_ops cmp_smp_ops;
-extern struct plat_smp_ops vsmp_smp_ops;
+static inline int register_up_smp_ops(void)
+{
+#ifdef CONFIG_SMP_UP
+ extern struct plat_smp_ops up_smp_ops;
+
+ register_smp_ops(&up_smp_ops);
+
+ return 0;
+#else
+ return -ENODEV;
+#endif
+}
+
+static inline int register_cmp_smp_ops(void)
+{
+#ifdef CONFIG_MIPS_CMP
+ extern struct plat_smp_ops cmp_smp_ops;
+
+ register_smp_ops(&cmp_smp_ops);
+
+ return 0;
+#else
+ return -ENODEV;
+#endif
+}
+
+static inline int register_vsmp_smp_ops(void)
+{
+#ifdef CONFIG_MIPS_MT_SMP
+ extern struct plat_smp_ops vsmp_smp_ops;
+
+ register_smp_ops(&vsmp_smp_ops);
+
+ return 0;
+#else
+ return -ENODEV;
+#endif
+}
#endif /* __ASM_SMP_OPS_H */
#include <linux/threads.h>
#include <linux/cpumask.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/smp-ops.h>
extern int smp_num_siblings;
extern void smtc_smp_finish(void);
extern void smtc_boot_secondary(int cpu, struct task_struct *t);
extern void smtc_cpus_done(void);
+extern void smtc_init_secondary(void);
/*
# define UASM_i_SLL(buf, rs, rt, sh) uasm_i_dsll(buf, rs, rt, sh)
# define UASM_i_SRA(buf, rs, rt, sh) uasm_i_dsra(buf, rs, rt, sh)
# define UASM_i_SRL(buf, rs, rt, sh) uasm_i_dsrl(buf, rs, rt, sh)
+# define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_dsrl_safe(buf, rs, rt, sh)
# define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_drotr(buf, rs, rt, sh)
# define UASM_i_MFC0(buf, rt, rd...) uasm_i_dmfc0(buf, rt, rd)
# define UASM_i_MTC0(buf, rt, rd...) uasm_i_dmtc0(buf, rt, rd)
# define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh)
# define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh)
# define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
+# define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
# define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_rotr(buf, rs, rt, sh)
# define UASM_i_MFC0(buf, rt, rd...) uasm_i_mfc0(buf, rt, rd)
# define UASM_i_MTC0(buf, rt, rd...) uasm_i_mtc0(buf, rt, rd)
#define __NR_open_by_handle_at (__NR_Linux + 340)
#define __NR_clock_adjtime (__NR_Linux + 341)
#define __NR_syncfs (__NR_Linux + 342)
-#define __NR_setns (__NR_Linux + 343)
+#define __NR_sendmmsg (__NR_Linux + 343)
+#define __NR_setns (__NR_Linux + 344)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 343
+#define __NR_Linux_syscalls 344
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 343
+#define __NR_O32_Linux_syscalls 344
#if _MIPS_SIM == _MIPS_SIM_ABI64
#define __NR_open_by_handle_at (__NR_Linux + 299)
#define __NR_clock_adjtime (__NR_Linux + 300)
#define __NR_syncfs (__NR_Linux + 301)
-#define __NR_setns (__NR_Linux + 302)
+#define __NR_sendmmsg (__NR_Linux + 302)
+#define __NR_setns (__NR_Linux + 303)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 302
+#define __NR_Linux_syscalls 303
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 302
+#define __NR_64_Linux_syscalls 303
#if _MIPS_SIM == _MIPS_SIM_NABI32
#define __NR_open_by_handle_at (__NR_Linux + 304)
#define __NR_clock_adjtime (__NR_Linux + 305)
#define __NR_syncfs (__NR_Linux + 306)
-#define __NR_setns (__NR_Linux + 307)
+#define __NR_sendmmsg (__NR_Linux + 307)
+#define __NR_setns (__NR_Linux + 308)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 307
+#define __NR_Linux_syscalls 308
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 307
+#define __NR_N32_Linux_syscalls 308
#ifdef __KERNEL__
local_irq_enable();
__asm__(" .globl __pastwait \n"
"__pastwait: \n");
- return;
}
/*
#include <linux/kgdb.h>
#include <linux/ftrace.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/system.h>
#include <asm/uaccess.h>
clear_c0_status(ST0_IM);
clear_c0_cause(CAUSEF_IP);
- /*
- * Only MT is using the software interrupts currently, so we just
- * leave them uninitialized for other processors.
- */
- if (cpu_has_mipsmt)
- for (i = irq_base; i < irq_base + 2; i++)
- irq_set_chip_and_handler(i, &mips_mt_cpu_irq_controller,
- handle_percpu_irq);
+ /* Software interrupts are used for MT/CMT IPI */
+ for (i = irq_base; i < irq_base + 2; i++)
+ irq_set_chip_and_handler(i, cpu_has_mipsmt ?
+ &mips_mt_cpu_irq_controller :
+ &mips_cpu_irq_controller,
+ handle_percpu_irq);
for (i = irq_base + 2; i < irq_base + 8; i++)
irq_set_chip_and_handler(i, &mips_cpu_irq_controller,
#include <asm/cpu.h>
#include <asm/processor.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/system.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
-
- return;
}
static void mipspmu_start(struct perf_event *event, int flags)
__init_dsp();
regs->cp0_epc = pc;
regs->regs[29] = sp;
- current_thread_info()->addr_limit = USER_DS;
}
void exit_thread(void)
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
#include <asm/cacheflush.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/system.h>
sys sys_open_by_handle_at 3 /* 4340 */
sys sys_clock_adjtime 2
sys sys_syncfs 1
+ sys sys_sendmmsg 4
sys sys_setns 2
.endm
PTR sys_open_by_handle_at
PTR sys_clock_adjtime /* 5300 */
PTR sys_syncfs
+ PTR sys_sendmmsg
PTR sys_setns
.size sys_call_table,.-sys_call_table
PTR sys_open_by_handle_at
PTR compat_sys_clock_adjtime /* 6305 */
PTR sys_syncfs
+ PTR compat_sys_sendmmsg
PTR sys_setns
.size sysn32_call_table,.-sysn32_call_table
PTR compat_sys_open_by_handle_at /* 4340 */
PTR compat_sys_clock_adjtime
PTR sys_syncfs
+ PTR compat_sys_sendmmsg
PTR sys_setns
.size sys_call_table,.-sys_call_table
#include <linux/interrupt.h>
#include <linux/compiler.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <linux/compiler.h>
#include <linux/smp.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <linux/err.h>
#include <linux/ftrace.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/r4k-timer.h>
#include <asm/cpu.h>
#include <asm/processor.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/system.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/cpu.h>
#include <asm/processor.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/system.h>
#include <asm/hardirq.h>
#include <asm/hazards.h>
#include <linux/cpumask.h>
#include <asm/r4k-timer.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/barrier.h>
#include <asm/mipsregs.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/cacheflush.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/cpu.h>
#include <asm/mips_mt.h>
#include <asm/processor.h>
}
EXPORT_SYMBOL(clk_put);
+int clk_enable(struct clk *clk)
+{
+ /* not used */
+ return 0;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+ /* not used */
+}
+EXPORT_SYMBOL(clk_disable);
+
static inline u32 ltq_get_counter_resolution(void)
{
u32 res;
/* flush the write action */
inb(EC_IO_PORT_DATA);
spin_unlock_irqrestore(&index_access_lock, flags);
-
- return;
}
EXPORT_SYMBOL_GPL(ec_write);
#include <asm/time.h>
#include <asm/mips-boards/sim.h>
#include <asm/mips-boards/simint.h>
+#include <asm/smp-ops.h>
static void __init serial_init(void);
prom_meminit();
-#ifdef CONFIG_MIPS_MT_SMP
- if (cpu_has_mipsmt)
- register_smp_ops(&vsmp_smp_ops);
- else
- register_smp_ops(&up_smp_ops);
-#endif
+ if (cpu_has_mipsmt) {
+ if (!register_vsmp_smp_ops())
+ return;
+
#ifdef CONFIG_MIPS_MT_SMTC
- if (cpu_has_mipsmt)
register_smp_ops(&ssmtc_smp_ops);
- else
- register_smp_ops(&up_smp_ops);
+ return;
#endif
+ }
+
+ register_up_smp_ops();
}
static void __init serial_init(void)
#include <linux/interrupt.h>
#include <linux/smp.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/smtc.h>
r4k_blast_scache();
else
blast_scache_range(addr, addr + size);
+ __sync();
return;
}
}
bc_wback_inv(addr, size);
+ __sync();
}
static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
(addr + size - 1) & almask);
blast_inv_scache_range(addr, addr + size);
}
+ __sync();
return;
}
}
bc_inv(addr, size);
+ __sync();
}
#endif /* CONFIG_DMA_NONCOHERENT */
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/gfp.h>
+#include <linux/highmem.h>
#include <asm/cache.h>
#include <asm/io.h>
#include <dma-coherence.h>
-static inline unsigned long dma_addr_to_virt(struct device *dev,
+static inline struct page *dma_addr_to_page(struct device *dev,
dma_addr_t dma_addr)
{
- unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr);
-
- return (unsigned long)phys_to_virt(addr);
+ return pfn_to_page(
+ plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
}
/*
free_pages(addr, get_order(size));
}
-static inline void __dma_sync(unsigned long addr, size_t size,
+static inline void __dma_sync_virtual(void *addr, size_t size,
enum dma_data_direction direction)
{
switch (direction) {
case DMA_TO_DEVICE:
- dma_cache_wback(addr, size);
+ dma_cache_wback((unsigned long)addr, size);
break;
case DMA_FROM_DEVICE:
- dma_cache_inv(addr, size);
+ dma_cache_inv((unsigned long)addr, size);
break;
case DMA_BIDIRECTIONAL:
- dma_cache_wback_inv(addr, size);
+ dma_cache_wback_inv((unsigned long)addr, size);
break;
default:
}
}
+/*
+ * A single sg entry may refer to multiple physically contiguous
+ * pages. But we still need to process highmem pages individually.
+ * If highmem is not configured then the bulk of this loop gets
+ * optimized out.
+ */
+static inline void __dma_sync(struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction direction)
+{
+ size_t left = size;
+
+ do {
+ size_t len = left;
+
+ if (PageHighMem(page)) {
+ void *addr;
+
+ if (offset + len > PAGE_SIZE) {
+ if (offset >= PAGE_SIZE) {
+ page += offset >> PAGE_SHIFT;
+ offset &= ~PAGE_MASK;
+ }
+ len = PAGE_SIZE - offset;
+ }
+
+ addr = kmap_atomic(page);
+ __dma_sync_virtual(addr + offset, len, direction);
+ kunmap_atomic(addr);
+ } else
+ __dma_sync_virtual(page_address(page) + offset,
+ size, direction);
+ offset = 0;
+ page++;
+ left -= len;
+ } while (left);
+}
+
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
{
if (cpu_is_noncoherent_r10000(dev))
- __dma_sync(dma_addr_to_virt(dev, dma_addr), size,
- direction);
+ __dma_sync(dma_addr_to_page(dev, dma_addr),
+ dma_addr & ~PAGE_MASK, size, direction);
plat_unmap_dma_mem(dev, dma_addr, size, direction);
}
int i;
for (i = 0; i < nents; i++, sg++) {
- unsigned long addr;
-
- addr = (unsigned long) sg_virt(sg);
- if (!plat_device_is_coherent(dev) && addr)
- __dma_sync(addr, sg->length, direction);
- sg->dma_address = plat_map_dma_mem(dev,
- (void *)addr, sg->length);
+ if (!plat_device_is_coherent(dev))
+ __dma_sync(sg_page(sg), sg->offset, sg->length,
+ direction);
+ sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
+ sg->offset;
}
return nents;
unsigned long offset, size_t size, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
- unsigned long addr;
-
- addr = (unsigned long) page_address(page) + offset;
-
if (!plat_device_is_coherent(dev))
- __dma_sync(addr, size, direction);
+ __dma_sync(page, offset, size, direction);
- return plat_map_dma_mem(dev, (void *)addr, size);
+ return plat_map_dma_mem_page(dev, page) + offset;
}
static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nhwentries, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
- unsigned long addr;
int i;
for (i = 0; i < nhwentries; i++, sg++) {
if (!plat_device_is_coherent(dev) &&
- direction != DMA_TO_DEVICE) {
- addr = (unsigned long) sg_virt(sg);
- if (addr)
- __dma_sync(addr, sg->length, direction);
- }
+ direction != DMA_TO_DEVICE)
+ __dma_sync(sg_page(sg), sg->offset, sg->length,
+ direction);
plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
}
}
static void mips_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{
- if (cpu_is_noncoherent_r10000(dev)) {
- unsigned long addr;
-
- addr = dma_addr_to_virt(dev, dma_handle);
- __dma_sync(addr, size, direction);
- }
+ if (cpu_is_noncoherent_r10000(dev))
+ __dma_sync(dma_addr_to_page(dev, dma_handle),
+ dma_handle & ~PAGE_MASK, size, direction);
}
static void mips_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{
plat_extra_sync_for_device(dev);
- if (!plat_device_is_coherent(dev)) {
- unsigned long addr;
-
- addr = dma_addr_to_virt(dev, dma_handle);
- __dma_sync(addr, size, direction);
- }
+ if (!plat_device_is_coherent(dev))
+ __dma_sync(dma_addr_to_page(dev, dma_handle),
+ dma_handle & ~PAGE_MASK, size, direction);
}
static void mips_dma_sync_sg_for_cpu(struct device *dev,
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
if (cpu_is_noncoherent_r10000(dev))
- __dma_sync((unsigned long)page_address(sg_page(sg)),
- sg->length, direction);
+ __dma_sync(sg_page(sg), sg->offset, sg->length,
+ direction);
}
}
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
if (!plat_device_is_coherent(dev))
- __dma_sync((unsigned long)page_address(sg_page(sg)),
- sg->length, direction);
+ __dma_sync(sg_page(sg), sg->offset, sg->length,
+ direction);
}
}
plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev))
- __dma_sync((unsigned long)vaddr, size, direction);
+ __dma_sync_virtual(vaddr, size, direction);
}
EXPORT_SYMBOL(dma_cache_sync);
k = __pmd_offset(vaddr);
pgd = pgd_base + i;
- for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
+ for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
pud = (pud_t *)pgd;
- for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
+ for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
pmd = (pmd_t *)pud;
- for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
+ for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
if (pmd_none(*pmd)) {
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
set_pmd(pmd, __pmd((unsigned long)pte));
#ifdef CONFIG_DISCONTIGMEM
#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
#endif
- max_mapnr = highend_pfn;
+ max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
#else
max_mapnr = max_low_pfn;
#endif
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
+#include <linux/personality.h>
#include <linux/random.h>
#include <linux/sched.h>
EXPORT_SYMBOL(shm_align_mask);
+/* gap between mmap and stack */
+#define MIN_GAP (128*1024*1024UL)
+#define MAX_GAP ((TASK_SIZE)/6*5)
+
+static int mmap_is_legacy(void)
+{
+ if (current->personality & ADDR_COMPAT_LAYOUT)
+ return 1;
+
+ if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
+ return 1;
+
+ return sysctl_legacy_va_layout;
+}
+
+static unsigned long mmap_base(unsigned long rnd)
+{
+ unsigned long gap = rlimit(RLIMIT_STACK);
+
+ if (gap < MIN_GAP)
+ gap = MIN_GAP;
+ else if (gap > MAX_GAP)
+ gap = MAX_GAP;
+
+ return PAGE_ALIGN(TASK_SIZE - gap - rnd);
+}
+
+static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
+ unsigned long pgoff)
+{
+ unsigned long base = addr & ~shm_align_mask;
+ unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
+
+ if (base + off <= addr)
+ return base + off;
+
+ return base - off;
+}
+
#define COLOUR_ALIGN(addr,pgoff) \
((((addr) + shm_align_mask) & ~shm_align_mask) + \
(((pgoff) << PAGE_SHIFT) & shm_align_mask))
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+enum mmap_allocation_direction {UP, DOWN};
+
+static unsigned long arch_get_unmapped_area_foo(struct file *filp,
+ unsigned long addr0, unsigned long len, unsigned long pgoff,
+ unsigned long flags, enum mmap_allocation_direction dir)
{
- struct vm_area_struct * vmm;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long addr = addr0;
int do_color_align;
- if (len > TASK_SIZE)
+ if (unlikely(len > TASK_SIZE))
return -ENOMEM;
if (flags & MAP_FIXED) {
- /* Even MAP_FIXED mappings must reside within TASK_SIZE. */
+ /* Even MAP_FIXED mappings must reside within TASK_SIZE */
if (TASK_SIZE - len < addr)
return -EINVAL;
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
+
+ /* requesting a specific address */
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
- vmm = find_vma(current->mm, addr);
+
+ vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vmm || addr + len <= vmm->vm_start))
+ (!vma || addr + len <= vma->vm_start))
return addr;
}
- addr = current->mm->mmap_base;
- if (do_color_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- else
- addr = PAGE_ALIGN(addr);
- for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
- /* At this point: (!vmm || addr < vmm->vm_end). */
- if (TASK_SIZE - len < addr)
- return -ENOMEM;
- if (!vmm || addr + len <= vmm->vm_start)
- return addr;
- addr = vmm->vm_end;
+ if (dir == UP) {
+ addr = mm->mmap_base;
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+ if (!vma || addr + len <= vma->vm_start)
+ return addr;
+ addr = vma->vm_end;
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ }
+ } else {
+ /* check if free_area_cache is useful for us */
+ if (len <= mm->cached_hole_size) {
+ mm->cached_hole_size = 0;
+ mm->free_area_cache = mm->mmap_base;
+ }
+
+ /* either no address requested or can't fit in requested address hole */
+ addr = mm->free_area_cache;
+ if (do_color_align) {
+ unsigned long base =
+ COLOUR_ALIGN_DOWN(addr - len, pgoff);
+
+ addr = base + len;
+ }
+
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr - len);
+ if (!vma || addr <= vma->vm_start) {
+ /* remember the address as a hint for next time */
+ return mm->free_area_cache = addr-len;
+ }
+ }
+
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+ addr = mm->mmap_base-len;
if (do_color_align)
- addr = COLOUR_ALIGN(addr, pgoff);
+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+
+ do {
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+ if (likely(!vma || addr+len <= vma->vm_start)) {
+ /* remember the address as a hint for next time */
+ return mm->free_area_cache = addr;
+ }
+
+ /* remember the largest hole we saw so far */
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+ addr = vma->vm_start-len;
+ if (do_color_align)
+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+ } while (likely(len < vma->vm_start));
+
+bottomup:
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ mm->cached_hole_size = ~0UL;
+ mm->free_area_cache = TASK_UNMAPPED_BASE;
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+ /*
+ * Restore the topdown base:
+ */
+ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+
+ return addr;
}
}
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ return arch_get_unmapped_area_foo(filp,
+ addr0, len, pgoff, flags, UP);
+}
+
+/*
+ * There is no need to export this but sched.h declares the function as
+ * extern so making it static here results in an error.
+ */
+unsigned long arch_get_unmapped_area_topdown(struct file *filp,
+ unsigned long addr0, unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ return arch_get_unmapped_area_foo(filp,
+ addr0, len, pgoff, flags, DOWN);
+}
+
void arch_pick_mmap_layout(struct mm_struct *mm)
{
unsigned long random_factor = 0UL;
random_factor &= 0xffffffful;
}
- mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
- mm->get_unmapped_area = arch_get_unmapped_area;
- mm->unmap_area = arch_unmap_area;
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base(random_factor);
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
}
static inline unsigned long brk_rnd(void)
* Fixed mappings:
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, 0, pgd_base);
+ fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
#ifdef CONFIG_HIGHMEM
/*
* Fixed mappings:
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, 0, pgd_base);
+ fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
}
extern void tlb_do_page_fault_0(void);
extern void tlb_do_page_fault_1(void);
+struct work_registers {
+ int r1;
+ int r2;
+ int r3;
+};
+
+struct tlb_reg_save {
+ unsigned long a;
+ unsigned long b;
+} ____cacheline_aligned_in_smp;
+
+static struct tlb_reg_save handler_reg_save[NR_CPUS];
static inline int r45k_bvahwbug(void)
{
static int pgd_reg __cpuinitdata;
enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
+static struct work_registers __cpuinit build_get_work_registers(u32 **p)
+{
+ struct work_registers r;
+
+ int smp_processor_id_reg;
+ int smp_processor_id_sel;
+ int smp_processor_id_shift;
+
+ if (scratch_reg > 0) {
+ /* Save in CPU local C0_KScratch? */
+ UASM_i_MTC0(p, 1, 31, scratch_reg);
+ r.r1 = K0;
+ r.r2 = K1;
+ r.r3 = 1;
+ return r;
+ }
+
+ if (num_possible_cpus() > 1) {
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
+ smp_processor_id_shift = 51;
+ smp_processor_id_reg = 20; /* XContext */
+ smp_processor_id_sel = 0;
+#else
+# ifdef CONFIG_32BIT
+ smp_processor_id_shift = 25;
+ smp_processor_id_reg = 4; /* Context */
+ smp_processor_id_sel = 0;
+# endif
+# ifdef CONFIG_64BIT
+ smp_processor_id_shift = 26;
+ smp_processor_id_reg = 4; /* Context */
+ smp_processor_id_sel = 0;
+# endif
+#endif
+ /* Get smp_processor_id */
+ UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel);
+ UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift);
+
+ /* handler_reg_save index in K0 */
+ UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
+
+ UASM_i_LA(p, K1, (long)&handler_reg_save);
+ UASM_i_ADDU(p, K0, K0, K1);
+ } else {
+ UASM_i_LA(p, K0, (long)&handler_reg_save);
+ }
+ /* K0 now points to save area, save $1 and $2 */
+ UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
+ UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
+
+ r.r1 = K1;
+ r.r2 = 1;
+ r.r3 = 2;
+ return r;
+}
+
+static void __cpuinit build_restore_work_registers(u32 **p)
+{
+ if (scratch_reg > 0) {
+ UASM_i_MFC0(p, 1, 31, scratch_reg);
+ return;
+ }
+ /* K0 already points to save area, restore $1 and $2 */
+ UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
+ UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
+}
+
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
/*
memset(relocs, 0, sizeof(relocs));
memset(final_handler, 0, sizeof(final_handler));
- if (scratch_reg == 0)
- scratch_reg = allocate_kscratch();
-
if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) {
htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
scratch_reg);
*/
static void __cpuinit
build_pte_present(u32 **p, struct uasm_reloc **r,
- unsigned int pte, unsigned int ptr, enum label_id lid)
+ int pte, int ptr, int scratch, enum label_id lid)
{
+ int t = scratch >= 0 ? scratch : pte;
+
if (kernel_uses_smartmips_rixi) {
if (use_bbit_insns()) {
uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
uasm_i_nop(p);
} else {
- uasm_i_andi(p, pte, pte, _PAGE_PRESENT);
- uasm_il_beqz(p, r, pte, lid);
- iPTE_LW(p, pte, ptr);
+ uasm_i_andi(p, t, pte, _PAGE_PRESENT);
+ uasm_il_beqz(p, r, t, lid);
+ if (pte == t)
+ /* You lose the SMP race :-(*/
+ iPTE_LW(p, pte, ptr);
}
} else {
- uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
- uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
- uasm_il_bnez(p, r, pte, lid);
- iPTE_LW(p, pte, ptr);
+ uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
+ uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
+ uasm_il_bnez(p, r, t, lid);
+ if (pte == t)
+ /* You lose the SMP race :-(*/
+ iPTE_LW(p, pte, ptr);
}
}
*/
static void __cpuinit
build_pte_writable(u32 **p, struct uasm_reloc **r,
- unsigned int pte, unsigned int ptr, enum label_id lid)
+ unsigned int pte, unsigned int ptr, int scratch,
+ enum label_id lid)
{
- if (use_bbit_insns()) {
- uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
- uasm_i_nop(p);
- uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
- uasm_i_nop(p);
- } else {
- uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
- uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
- uasm_il_bnez(p, r, pte, lid);
+ int t = scratch >= 0 ? scratch : pte;
+
+ uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
+ uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
+ uasm_il_bnez(p, r, t, lid);
+ if (pte == t)
+ /* You lose the SMP race :-(*/
iPTE_LW(p, pte, ptr);
- }
+ else
+ uasm_i_nop(p);
}
/* Make PTE writable, update software status bits as well, then store
*/
static void __cpuinit
build_pte_modifiable(u32 **p, struct uasm_reloc **r,
- unsigned int pte, unsigned int ptr, enum label_id lid)
+ unsigned int pte, unsigned int ptr, int scratch,
+ enum label_id lid)
{
if (use_bbit_insns()) {
uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
uasm_i_nop(p);
} else {
- uasm_i_andi(p, pte, pte, _PAGE_WRITE);
- uasm_il_beqz(p, r, pte, lid);
- iPTE_LW(p, pte, ptr);
+ int t = scratch >= 0 ? scratch : pte;
+ uasm_i_andi(p, t, pte, _PAGE_WRITE);
+ uasm_il_beqz(p, r, t, lid);
+ if (pte == t)
+ /* You lose the SMP race :-(*/
+ iPTE_LW(p, pte, ptr);
}
}
memset(relocs, 0, sizeof(relocs));
build_r3000_tlbchange_handler_head(&p, K0, K1);
- build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
+ build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
uasm_i_nop(&p); /* load delay */
build_make_valid(&p, &r, K0, K1);
build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
memset(relocs, 0, sizeof(relocs));
build_r3000_tlbchange_handler_head(&p, K0, K1);
- build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
+ build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
uasm_i_nop(&p); /* load delay */
build_make_write(&p, &r, K0, K1);
build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
u32 *p = handle_tlbm;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
+ struct work_registers wr;
memset(handle_tlbm, 0, sizeof(handle_tlbm));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
build_r3000_tlbchange_handler_head(&p, K0, K1);
- build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
+ build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
uasm_i_nop(&p); /* load delay */
build_make_write(&p, &r, K0, K1);
build_r3000_pte_reload_tlbwi(&p, K0, K1);
/*
* R4000 style TLB load/store/modify handlers.
*/
-static void __cpuinit
+static struct work_registers __cpuinit
build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
- struct uasm_reloc **r, unsigned int pte,
- unsigned int ptr)
+ struct uasm_reloc **r)
{
+ struct work_registers wr = build_get_work_registers(p);
+
#ifdef CONFIG_64BIT
- build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
+ build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
#else
- build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
+ build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
#endif
#ifdef CONFIG_HUGETLB_PAGE
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
- build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update);
+ build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
#endif
- UASM_i_MFC0(p, pte, C0_BADVADDR);
- UASM_i_LW(p, ptr, 0, ptr);
- UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
- uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
- UASM_i_ADDU(p, ptr, ptr, pte);
+ UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
+ UASM_i_LW(p, wr.r2, 0, wr.r2);
+ UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
+ uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
+ UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
#ifdef CONFIG_SMP
uasm_l_smp_pgtable_change(l, *p);
#endif
- iPTE_LW(p, pte, ptr); /* get even pte */
+ iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
if (!m4kc_tlbp_war())
build_tlb_probe_entry(p);
+ return wr;
}
static void __cpuinit
build_update_entries(p, tmp, ptr);
build_tlb_write_entry(p, l, r, tlb_indexed);
uasm_l_leave(l, *p);
+ build_restore_work_registers(p);
uasm_i_eret(p); /* return from trap */
#ifdef CONFIG_64BIT
u32 *p = handle_tlbl;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
+ struct work_registers wr;
memset(handle_tlbl, 0, sizeof(handle_tlbl));
memset(labels, 0, sizeof(labels));
/* No need for uasm_i_nop */
}
- build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
- build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
+ wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
+ build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
if (m4kc_tlbp_war())
build_tlb_probe_entry(&p);
* have triggered it. Skip the expensive test..
*/
if (use_bbit_insns()) {
- uasm_il_bbit0(&p, &r, K0, ilog2(_PAGE_VALID),
+ uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
label_tlbl_goaround1);
} else {
- uasm_i_andi(&p, K0, K0, _PAGE_VALID);
- uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1);
+ uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
+ uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
}
uasm_i_nop(&p);
uasm_i_tlbr(&p);
/* Examine entrylo 0 or 1 based on ptr. */
if (use_bbit_insns()) {
- uasm_i_bbit0(&p, K1, ilog2(sizeof(pte_t)), 8);
+ uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
} else {
- uasm_i_andi(&p, K0, K1, sizeof(pte_t));
- uasm_i_beqz(&p, K0, 8);
+ uasm_i_andi(&p, wr.r3, wr.r2