extern void calc_global_load(unsigned long ticks);
+extern void update_cpu_load_nohz(void);
extern unsigned long get_parent_ip(unsigned long addr);
extern void set_dumpable(struct mm_struct *mm, int value);
extern int get_dumpable(struct mm_struct *mm);
+#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
+#define SUID_DUMP_USER 1 /* Dump as user of process */
+#define SUID_DUMP_ROOT 2 /* Dump as root */
+
/* mm flags */
/* dumpable bits */
#define MMF_DUMPABLE 0 /* core dump is permitted */
unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
#endif
unsigned long locked_shm; /* How many pages of mlocked shm ? */
+ unsigned long unix_inflight; /* How many files in flight in unix sockets */
+ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
#ifdef CONFIG_KEYS
struct key *uid_keyring; /* UID specific keyring */
struct sched_rt_entity {
struct list_head run_list;
unsigned long timeout;
+ unsigned long watchdog_stamp;
unsigned int time_slice;
int nr_cpus_allowed;
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
+#ifdef CONFIG_CGROUP_SCHED
+ struct task_group *sched_task_group;
+#endif
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
+ unsigned long atomic_flags; /* Flags needing atomic access. */
+
pid_t pid;
pid_t tgid;
#endif
#ifdef CONFIG_CPUSETS
nodemask_t mems_allowed; /* Protected by alloc_lock */
- int mems_allowed_change_disable;
+ seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
int cpuset_mem_spread_rotor;
int cpuset_slab_spread_rotor;
#endif
#ifdef CONFIG_FAULT_INJECTION
int make_it_fail;
#endif
- struct prop_local_single dirties;
/*
* when (nr_dirtied >= nr_dirtied_pause), it's time to call
* balance_dirty_pages() for some dirty throttling pause
}
+static inline int pid_alive(const struct task_struct *p);
+static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
+{
+ pid_t pid = 0;
+
+ rcu_read_lock();
+ if (pid_alive(tsk))
+ pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
+ rcu_read_unlock();
+
+ return pid;
+}
+
+static inline pid_t task_ppid_nr(const struct task_struct *tsk)
+{
+ return task_ppid_nr_ns(tsk, &init_pid_ns);
+}
+
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
struct pid_namespace *ns)
{
* If pid_alive fails, then pointers within the task structure
* can be stale and must not be dereferenced.
*/
-static inline int pid_alive(struct task_struct *p)
+static inline int pid_alive(const struct task_struct *p)
{
return p->pids[PIDTYPE_PID].pid != NULL;
}
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
-#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
-#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
+/* Per-process atomic flags. */
+#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
+#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
+
+#define TASK_PFA_TEST(name, func) \
+ static inline bool task_##func(struct task_struct *p) \
+ { return test_bit(PFA_##name, &p->atomic_flags); }
+#define TASK_PFA_SET(name, func) \
+ static inline void task_set_##func(struct task_struct *p) \
+ { set_bit(PFA_##name, &p->atomic_flags); }
+#define TASK_PFA_CLEAR(name, func) \
+ static inline void task_clear_##func(struct task_struct *p) \
+ { clear_bit(PFA_##name, &p->atomic_flags); }
+
/*
* task->jobctl flags
*/
}
#endif
+#ifdef CONFIG_NO_HZ
+void calc_load_enter_idle(void);
+void calc_load_exit_idle(void);
+#else
+static inline void calc_load_enter_idle(void) { }
+static inline void calc_load_exit_idle(void) { }
+#endif /* CONFIG_NO_HZ */
+
#ifndef CONFIG_CPUMASK_OFFSTACK
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
{
}
#endif
+TASK_PFA_TEST(SPREAD_PAGE, spread_page)
+TASK_PFA_SET(SPREAD_PAGE, spread_page)
+TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
+
+TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
+TASK_PFA_SET(SPREAD_SLAB, spread_slab)
+TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
+
/*
* Do not use outside of architecture code which knows its limitations.
*
* all we care about is that we have a task with the appropriate
* pid, we don't actually care if we have the right task.
*/
-static inline int has_group_leader_pid(struct task_struct *p)
+static inline bool has_group_leader_pid(struct task_struct *p)
{
- return p->pid == p->tgid;
+ return task_pid(p) == p->signal->leader_pid;
}
static inline
-int same_thread_group(struct task_struct *p1, struct task_struct *p2)
+bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
{
- return p1->tgid == p2->tgid;
+ return p1->signal == p2->signal;
}
static inline struct task_struct *next_thread(const struct task_struct *p)
extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
-extern void signal_wake_up(struct task_struct *t, int resume_stopped);
+extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
+
+static inline void signal_wake_up(struct task_struct *t, bool resume)
+{
+ signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
+}
+static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
+{
+ signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
+}
/*
* Wrappers for p->thread_info->cpu access. No-op on UP.
extern long sched_group_rt_period(struct task_group *tg);
extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
#endif
-#endif
+#endif /* CONFIG_CGROUP_SCHED */
extern int task_can_switch_user(struct user_struct *up,
struct task_struct *tsk);