* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
printk: fix wrong format string iter for printk
futex: comment requeue key reference semantics
* 'irq-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
irq: fix cpumask memory leak on offstack cpumask kernels
* 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
posix-timers: fix RLIMIT_CPU && setitimer(CPUCLOCK_PROF)
posix-timers: fix RLIMIT_CPU && fork()
timers: add missing kernel-doc
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
+#include <linux/gfp.h>
#include <linux/irqreturn.h>
#include <linux/irqnr.h>
#include <linux/errno.h>
+#include <linux/topology.h>
++#include <linux/wait.h>
#include <asm/irq.h>
#include <asm/ptrace.h>
#define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */
#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/
+#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */
#ifdef CONFIG_IRQ_PER_CPU
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
* @affinity: IRQ affinity on SMP
* @cpu: cpu index useful for balancing
* @pending_mask: pending rebalanced interrupts
++ * @threads_active: number of irqaction threads currently running
++ * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
* @dir: /proc/irq/ procfs entry
* @name: flow handler name for /proc/interrupts output
*/
struct irq_desc {
unsigned int irq;
-#ifdef CONFIG_SPARSE_IRQ
struct timer_rand_state *timer_rand_state;
unsigned int *kstat_irqs;
-# ifdef CONFIG_INTR_REMAP
+#ifdef CONFIG_INTR_REMAP
struct irq_2_iommu *irq_2_iommu;
-# endif
#endif
irq_flow_handler_t handle_irq;
struct irq_chip *chip;
unsigned int irqs_unhandled;
spinlock_t lock;
#ifdef CONFIG_SMP
- cpumask_t affinity;
+ cpumask_var_t affinity;
unsigned int cpu;
-#endif
#ifdef CONFIG_GENERIC_PENDING_IRQ
- cpumask_t pending_mask;
+ cpumask_var_t pending_mask;
+#endif
#endif
++ atomic_t threads_active;
++ wait_queue_head_t wait_for_threads;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *dir;
#endif
extern struct irq_desc irq_desc[NR_IRQS];
#else /* CONFIG_SPARSE_IRQ */
extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
-
-#define kstat_irqs_this_cpu(DESC) \
- ((DESC)->kstat_irqs[smp_processor_id()])
-#define kstat_incr_irqs_this_cpu(irqno, DESC) \
- ((DESC)->kstat_irqs[smp_processor_id()]++)
-
#endif /* CONFIG_SPARSE_IRQ */
extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
* Migration helpers for obsolete names, they will go away:
*/
#define hw_interrupt_type irq_chip
-typedef struct irq_chip hw_irq_controller;
#define no_irq_type no_irq_chip
typedef struct irq_desc irq_desc_t;
#include <asm/hw_irq.h>
extern int setup_irq(unsigned int irq, struct irqaction *new);
+extern void remove_irq(unsigned int irq, struct irqaction *act);
#ifdef CONFIG_GENERIC_HARDIRQS
}
/* Handle irq action chains: */
-extern int handle_IRQ_event(unsigned int irq, struct irqaction *action);
+extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action);
/*
* Built-in IRQ handlers for various IRQ types,
/* Handling of unhandled and spurious interrupts: */
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
- int action_ret);
+ irqreturn_t action_ret);
/* Resending of interrupts :*/
void check_irq_resend(struct irq_desc *desc, unsigned int irq);
#endif /* !CONFIG_S390 */
+#ifdef CONFIG_SMP
+/**
+ * init_alloc_desc_masks - allocate cpumasks for irq_desc
+ * @desc: pointer to irq_desc struct
+ * @cpu: cpu which will be handling the cpumasks
+ * @boot: true if need bootmem
+ *
+ * Allocates affinity and pending_mask cpumask if required.
+ * Returns true if successful (or not required).
+ * Side effect: affinity has all bits set, pending_mask has all bits clear.
+ */
+static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
+ bool boot)
+{
+ int node;
+
+ if (boot) {
+ alloc_bootmem_cpumask_var(&desc->affinity);
+ cpumask_setall(desc->affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ alloc_bootmem_cpumask_var(&desc->pending_mask);
+ cpumask_clear(desc->pending_mask);
+#endif
+ return true;
+ }
+
+ node = cpu_to_node(cpu);
+
+ if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
+ return false;
+ cpumask_setall(desc->affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
+ free_cpumask_var(desc->affinity);
+ return false;
+ }
+ cpumask_clear(desc->pending_mask);
+#endif
+ return true;
+}
+
+/**
+ * init_copy_desc_masks - copy cpumasks for irq_desc
+ * @old_desc: pointer to old irq_desc struct
+ * @new_desc: pointer to new irq_desc struct
+ *
+ * Insures affinity and pending_masks are copied to new irq_desc.
+ * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
+ * irq_desc struct so the copy is redundant.
+ */
+
+static inline void init_copy_desc_masks(struct irq_desc *old_desc,
+ struct irq_desc *new_desc)
+{
+#ifdef CONFIG_CPUMASKS_OFFSTACK
+ cpumask_copy(new_desc->affinity, old_desc->affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
+#endif
+#endif
+}
+
++ +static inline void free_desc_masks(struct irq_desc *old_desc,
++ + struct irq_desc *new_desc)
++ +{
++ + free_cpumask_var(old_desc->affinity);
++ +
++ +#ifdef CONFIG_GENERIC_PENDING_IRQ
++ + free_cpumask_var(old_desc->pending_mask);
++ +#endif
++ +}
++ +
+#else /* !CONFIG_SMP */
+
+static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
+ bool boot)
+{
+ return true;
+}
+
+static inline void init_copy_desc_masks(struct irq_desc *old_desc,
+ struct irq_desc *new_desc)
+{
+}
+
++ +static inline void free_desc_masks(struct irq_desc *old_desc,
++ + struct irq_desc *new_desc)
++ +{
++ +}
+#endif /* CONFIG_SMP */
+
#endif /* _LINUX_IRQ_H */
#include <linux/tty.h>
#include <linux/proc_fs.h>
#include <linux/blkdev.h>
+#include <linux/fs_struct.h>
#include <trace/sched.h>
+#include <linux/magic.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
{
struct task_struct *tsk;
struct thread_info *ti;
+ unsigned long *stackend;
+
int err;
prepare_to_copy(orig);
goto out;
setup_thread_stack(tsk, orig);
+ stackend = end_of_stack(tsk);
+ *stackend = STACK_END_MAGIC; /* for overflow detection */
#ifdef CONFIG_CC_STACKPROTECTOR
tsk->stack_canary = get_random_int();
mm->free_area_cache = oldmm->mmap_base;
mm->cached_hole_size = ~0UL;
mm->map_count = 0;
- cpus_clear(mm->cpu_vm_mask);
+ cpumask_clear(mm_cpumask(mm));
mm->mm_rb = RB_ROOT;
rb_link = &mm->mm_rb.rb_node;
rb_parent = NULL;
tsk->min_flt = tsk->maj_flt = 0;
tsk->nvcsw = tsk->nivcsw = 0;
++#ifdef CONFIG_DETECT_HUNG_TASK
++ tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
++#endif
tsk->mm = NULL;
tsk->active_mm = NULL;
return retval;
}
-static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
-{
- struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
- /* We don't need to lock fs - think why ;-) */
- if (fs) {
- atomic_set(&fs->count, 1);
- rwlock_init(&fs->lock);
- fs->umask = old->umask;
- read_lock(&old->lock);
- fs->root = old->root;
- path_get(&old->root);
- fs->pwd = old->pwd;
- path_get(&old->pwd);
- read_unlock(&old->lock);
- }
- return fs;
-}
-
-struct fs_struct *copy_fs_struct(struct fs_struct *old)
-{
- return __copy_fs_struct(old);
-}
-
-EXPORT_SYMBOL_GPL(copy_fs_struct);
-
static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
{
+ struct fs_struct *fs = current->fs;
if (clone_flags & CLONE_FS) {
- atomic_inc(¤t->fs->count);
+ /* tsk->fs is already what we want */
+ write_lock(&fs->lock);
+ if (fs->in_exec) {
+ write_unlock(&fs->lock);
+ return -EAGAIN;
+ }
+ fs->users++;
+ write_unlock(&fs->lock);
return 0;
}
- tsk->fs = __copy_fs_struct(current->fs);
+ tsk->fs = copy_fs_struct(fs);
if (!tsk->fs)
return -ENOMEM;
return 0;
sig->cputime_expires.virt_exp = cputime_zero;
sig->cputime_expires.sched_exp = 0;
+++ if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
+++ sig->cputime_expires.prof_exp =
+++ secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
+++ sig->cputimer.running = 1;
+++ }
+++
/* The timer lists. */
INIT_LIST_HEAD(&sig->cpu_timers[0]);
INIT_LIST_HEAD(&sig->cpu_timers[1]);
atomic_inc(¤t->signal->live);
return 0;
}
--- sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
---
--- if (sig)
--- posix_cpu_timers_init_group(sig);
+++ sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
tsk->signal = sig;
if (!sig)
return -ENOMEM;
atomic_set(&sig->live, 1);
init_waitqueue_head(&sig->wait_chldexit);
sig->flags = 0;
+ if (clone_flags & CLONE_NEWPID)
+ sig->flags |= SIGNAL_UNKILLABLE;
sig->group_exit_code = 0;
sig->group_exit_task = NULL;
sig->group_stop_count = 0;
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
task_unlock(current->group_leader);
+++ posix_cpu_timers_init_group(sig);
+++
acct_init_pacct(&sig->pacct);
tty_audit_fork(sig);
p->default_timer_slack_ns = current->timer_slack_ns;
--#ifdef CONFIG_DETECT_SOFTLOCKUP
-- p->last_switch_count = 0;
-- p->last_switch_timestamp = 0;
--#endif
--
task_io_accounting_init(&p->ioac);
acct_clear_integrals(p);
goto bad_fork_cleanup_mm;
if ((retval = copy_io(clone_flags, p)))
goto bad_fork_cleanup_namespaces;
- retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
+ retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
if (retval)
goto bad_fork_cleanup_io;
p->signal->leader_pid = pid;
tty_kref_put(p->signal->tty);
p->signal->tty = tty_kref_get(current->signal->tty);
- set_task_pgrp(p, task_pgrp_nr(current));
- set_task_session(p, task_session_nr(current));
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail_rcu(&p->tasks, &init_task.tasks);
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
mmap_init();
}
{
struct fs_struct *fs = current->fs;
- if ((unshare_flags & CLONE_FS) &&
- (fs && atomic_read(&fs->count) > 1)) {
- *new_fsp = __copy_fs_struct(current->fs);
- if (!*new_fsp)
- return -ENOMEM;
- }
+ if (!(unshare_flags & CLONE_FS) || !fs)
+ return 0;
+
+ /* don't need lock here; in the worst case we'll do useless copy */
+ if (fs->users == 1)
+ return 0;
+
+ *new_fsp = copy_fs_struct(fs);
+ if (!*new_fsp)
+ return -ENOMEM;
return 0;
}
if (new_fs) {
fs = current->fs;
+ write_lock(&fs->lock);
current->fs = new_fs;
- new_fs = fs;
+ if (--fs->users)
+ new_fs = NULL;
+ else
+ new_fs = fs;
+ write_unlock(&fs->lock);
}
if (new_mm) {
bad_unshare_cleanup_fs:
if (new_fs)
- put_fs_struct(new_fs);
+ free_fs_struct(new_fs);
bad_unshare_cleanup_thread:
bad_unshare_out:
debug_object_free(timer, &timer_debug_descr);
}
-static void __init_timer(struct timer_list *timer);
+static void __init_timer(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key);
-void init_timer_on_stack(struct timer_list *timer)
+void init_timer_on_stack_key(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key)
{
debug_object_init_on_stack(timer, &timer_debug_descr);
- __init_timer(timer);
+ __init_timer(timer, name, key);
}
-EXPORT_SYMBOL_GPL(init_timer_on_stack);
+EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
void destroy_timer_on_stack(struct timer_list *timer)
{
static inline void debug_timer_deactivate(struct timer_list *timer) { }
#endif
-static void __init_timer(struct timer_list *timer)
+static void __init_timer(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key)
{
timer->entry.next = NULL;
timer->base = __raw_get_cpu_var(tvec_bases);
timer->start_pid = -1;
memset(timer->start_comm, 0, TASK_COMM_LEN);
#endif
+ lockdep_init_map(&timer->lockdep_map, name, key, 0);
}
/**
--- * init_timer - initialize a timer.
+++ * init_timer_key - initialize a timer
* @timer: the timer to be initialized
+++ * @name: name of the timer
+++ * @key: lockdep class key of the fake lock used for tracking timer
+++ * sync lock dependencies
*
--- * init_timer() must be done to a timer prior calling *any* of the
+++ * init_timer_key() must be done to a timer prior calling *any* of the
* other timer functions.
*/
-void init_timer(struct timer_list *timer)
+void init_timer_key(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key)
{
debug_timer_init(timer);
- __init_timer(timer);
+ __init_timer(timer, name, key);
}
-EXPORT_SYMBOL(init_timer);
+EXPORT_SYMBOL(init_timer_key);
-void init_timer_deferrable(struct timer_list *timer)
+void init_timer_deferrable_key(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key)
{
- init_timer(timer);
+ init_timer_key(timer, name, key);
timer_set_deferrable(timer);
}
-EXPORT_SYMBOL(init_timer_deferrable);
+EXPORT_SYMBOL(init_timer_deferrable_key);
static inline void detach_timer(struct timer_list *timer,
int clear_pending)
*/
int del_timer_sync(struct timer_list *timer)
{
+#ifdef CONFIG_LOCKDEP
+ unsigned long flags;
+
+ local_irq_save(flags);
+ lock_map_acquire(&timer->lockdep_map);
+ lock_map_release(&timer->lockdep_map);
+ local_irq_restore(flags);
+#endif
+
for (;;) {
int ret = try_to_del_timer_sync(timer);
if (ret >= 0)
set_running_timer(base, timer);
detach_timer(timer, 1);
+
spin_unlock_irq(&base->lock);
{
int preempt_count = preempt_count();
+
+#ifdef CONFIG_LOCKDEP
+ /*
+ * It is permissible to free the timer from
+ * inside the function that is called from
+ * it, this we need to take into account for
+ * lockdep too. To avoid bogus "held lock
+ * freed" warnings as well as problems when
+ * looking into timer->lockdep_map, make a
+ * copy and use that here.
+ */
+ struct lockdep_map lockdep_map =
+ timer->lockdep_map;
+#endif
+ /*
+ * Couple the lock chain with the lock chain at
+ * del_timer_sync() by acquiring the lock_map
+ * around the fn() call here and in
+ * del_timer_sync().
+ */
+ lock_map_acquire(&lockdep_map);
+
fn(data);
+
+ lock_map_release(&lockdep_map);
+
if (preempt_count != preempt_count()) {
printk(KERN_ERR "huh, entered %p "
"with preempt_count %08x, exited"