#include <linux/topology.h>
#include <linux/seccomp.h>
#include <linux/rcupdate.h>
+#include <linux/futex.h>
#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
extern void scheduler_tick(void);
#ifdef CONFIG_DETECT_SOFTLOCKUP
-extern void softlockup_tick(struct pt_regs *regs);
+extern void softlockup_tick(void);
extern void spawn_softlockup_task(void);
extern void touch_softlockup_watchdog(void);
#else
-static inline void softlockup_tick(struct pt_regs *regs)
+static inline void softlockup_tick(void)
{
}
static inline void spawn_softlockup_task(void)
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
- unsigned long mmap_base; /* base of mmap area */
- unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
+ unsigned long mmap_base; /* base of mmap area */
+ unsigned long task_size; /* size of task vm space */
+ unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
pgd_t * pgd;
atomic_t mm_users; /* How many users with user space? */
/* ITIMER_REAL timer for the process */
struct hrtimer real_timer;
+ struct task_struct *tsk;
ktime_t it_real_incr;
/* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
int lock_depth; /* BKL lock depth */
-#if defined(CONFIG_SMP)
- int last_waker_cpu; /* CPU that last woke this task up */
-#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
+#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
int oncpu;
-#endif
#endif
int prio, static_prio;
struct list_head run_list;
prio_array_t *array;
unsigned short ioprio;
+ unsigned int btrace_seq;
unsigned long sleep_avg;
unsigned long long timestamp, last_ran;
struct cpuset *cpuset;
nodemask_t mems_allowed;
int cpuset_mems_generation;
+ int cpuset_mem_spread_rotor;
+#endif
+ struct robust_list_head __user *robust_list;
+#ifdef CONFIG_COMPAT
+ struct compat_robust_list_head __user *compat_robust_list;
#endif
+
atomic_t fs_excl; /* holding fs exclusive resources */
struct rcu_head rcu;
};
}
extern void free_task(struct task_struct *tsk);
-extern void __put_task_struct(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
extern void __put_task_struct_cb(struct rcu_head *rhp);
#define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */
#define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */
#define PF_SWAPWRITE 0x01000000 /* Allowed to write to swap */
+#define PF_SPREAD_PAGE 0x04000000 /* Spread page cache over cpuset */
+#define PF_SPREAD_SLAB 0x08000000 /* Spread some slab caches over cpuset */
+#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
/*
* Only the _current_ task can read/write to tsk->flags, but other
extern int send_sig(int, struct task_struct *, int);
extern void zap_other_threads(struct task_struct *p);
extern int kill_pg(pid_t, int, int);
-extern int kill_sl(pid_t, int, int);
extern int kill_proc(pid_t, int, int);
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
extern int send_sigqueue(int, struct sigqueue *, struct task_struct *);
extern int send_group_sigqueue(int, struct sigqueue *, struct task_struct *);
-extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *);
+extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
/* These can be the second arg to send_sig_info/send_group_sig_info. */
#endif
#define remove_parent(p) list_del_init(&(p)->sibling)
-#define add_parent(p, parent) list_add_tail(&(p)->sibling,&(parent)->children)
-
-#define REMOVE_LINKS(p) do { \
- if (thread_group_leader(p)) \
- list_del_init(&(p)->tasks); \
- remove_parent(p); \
- } while (0)
-
-#define SET_LINKS(p) do { \
- if (thread_group_leader(p)) \
- list_add_tail(&(p)->tasks,&init_task.tasks); \
- add_parent(p, (p)->parent); \
- } while (0)
+#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children)
#define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks)
#define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks)
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
-extern void unhash_process(struct task_struct *p);
-
/*
* Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also