summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h110
1 files changed, 95 insertions, 15 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 34ed0d99b1bd..331f4502e92b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -24,6 +24,8 @@
#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */
#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */
#define CLONE_STOPPED 0x02000000 /* Start in stopped state */
+#define CLONE_NEWUTS 0x04000000 /* New utsname group? */
+#define CLONE_NEWIPC 0x08000000 /* New ipcs */
/*
* Scheduling policies
@@ -118,7 +120,6 @@ extern unsigned long avenrun[]; /* Load averages */
extern unsigned long total_forks;
extern int nr_threads;
-extern int last_pid;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
@@ -148,6 +149,7 @@ extern unsigned long weighted_cpuload(const int cpu);
#define EXIT_DEAD 32
/* in tsk->state again */
#define TASK_NONINTERACTIVE 64
+#define TASK_DEAD 128
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
@@ -238,7 +240,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
asmlinkage void schedule(void);
-struct namespace;
+struct nsproxy;
/* Maximum number of active map areas.. This is a random (large) number */
#define DEFAULT_MAX_MAP_COUNT 65536
@@ -504,8 +506,8 @@ struct signal_struct {
#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
#define rt_task(p) rt_prio((p)->prio)
#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
-#define has_rt_policy(p) \
- unlikely((p)->policy != SCHED_NORMAL && (p)->policy != SCHED_BATCH)
+#define is_rt_policy(p) ((p) != SCHED_NORMAL && (p) != SCHED_BATCH)
+#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy))
/*
* Some day this will be a full-fledged user tracking system..
@@ -623,9 +625,17 @@ enum idle_type
#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */
#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */
#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
+#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */
-#define BALANCE_FOR_POWER ((sched_mc_power_savings || sched_smt_power_savings) \
- ? SD_POWERSAVINGS_BALANCE : 0)
+#define BALANCE_FOR_MC_POWER \
+ (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
+
+#define BALANCE_FOR_PKG_POWER \
+ ((sched_mc_power_savings || sched_smt_power_savings) ? \
+ SD_POWERSAVINGS_BALANCE : 0)
+
+#define test_sd_parent(sd, flag) ((sd->parent && \
+ (sd->parent->flags & flag)) ? 1 : 0)
struct sched_group {
@@ -642,6 +652,7 @@ struct sched_group {
struct sched_domain {
/* These fields must be setup */
struct sched_domain *parent; /* top domain must be null terminated */
+ struct sched_domain *child; /* bottom domain must be null terminated */
struct sched_group *groups; /* the balancing groups of the domain */
cpumask_t span; /* span of all CPUs in this domain */
unsigned long min_interval; /* Minimum balance interval ms */
@@ -709,7 +720,6 @@ extern unsigned int max_cache_size;
struct io_context; /* See blkdev.h */
-void exit_io_context(void);
struct cpuset;
#define NGROUPS_SMALL 32
@@ -754,6 +764,7 @@ static inline void prefetch_stack(struct task_struct *t) { }
struct audit_context; /* See audit.c */
struct mempolicy;
struct pipe_inode_info;
+struct uts_namespace;
enum sleep_type {
SLEEP_NORMAL,
@@ -784,8 +795,9 @@ struct task_struct {
struct prio_array *array;
unsigned short ioprio;
+#ifdef CONFIG_BLK_DEV_IO_TRACE
unsigned int btrace_seq;
-
+#endif
unsigned long sleep_avg;
unsigned long long timestamp, last_ran;
unsigned long long sched_time; /* sched_clock time spent running */
@@ -819,6 +831,11 @@ struct task_struct {
unsigned did_exec:1;
pid_t pid;
pid_t tgid;
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+ /* Canary value for the -fstack-protector gcc feature */
+ unsigned long stack_canary;
+#endif
/*
* pointers to (original) parent process, youngest child, younger sibling,
* older sibling, respectively. (p->father can be replaced with
@@ -865,6 +882,15 @@ struct task_struct {
struct key *thread_keyring; /* keyring private to this thread */
unsigned char jit_keyring; /* default keyring to attach requested keys to */
#endif
+ /*
+ * fpu_counter contains the number of consecutive context switches
+ * that the FPU is used. If this is over a threshold, the lazy fpu
+ * saving becomes unlazy to save the trap. This is an unsigned char
+ * so that after 256 times the counter wraps and the behavior turns
+ * lazy again; this to deal with bursty apps that only use FPU for
+ * a short time
+ */
+ unsigned char fpu_counter;
int oomkilladj; /* OOM kill score adjustment (bit shift). */
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
@@ -872,16 +898,18 @@ struct task_struct {
- initialized normally by flush_old_exec */
/* file system info */
int link_count, total_link_count;
+#ifdef CONFIG_SYSVIPC
/* ipc stuff */
struct sysv_sem sysvsem;
+#endif
/* CPU-specific state of this task */
struct thread_struct thread;
/* filesystem information */
struct fs_struct *fs;
/* open file information */
struct files_struct *files;
-/* namespace */
- struct namespace *namespace;
+/* namespaces */
+ struct nsproxy *nsproxy;
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
@@ -964,10 +992,10 @@ struct task_struct {
wait_queue_t *io_wait;
/* i/o counters(bytes read/written, #syscalls */
u64 rchar, wchar, syscr, syscw;
-#if defined(CONFIG_BSD_PROCESS_ACCT)
+#if defined(CONFIG_TASK_XACCT)
u64 acct_rss_mem1; /* accumulated rss usage */
u64 acct_vm_mem1; /* accumulated virtual memory usage */
- clock_t acct_stimexpd; /* clock_t-converted stime since last update */
+ cputime_t acct_stimexpd;/* stime since last update */
#endif
#ifdef CONFIG_NUMA
struct mempolicy *mempolicy;
@@ -1003,6 +1031,26 @@ static inline pid_t process_group(struct task_struct *tsk)
return tsk->signal->pgrp;
}
+static inline struct pid *task_pid(struct task_struct *task)
+{
+ return task->pids[PIDTYPE_PID].pid;
+}
+
+static inline struct pid *task_tgid(struct task_struct *task)
+{
+ return task->group_leader->pids[PIDTYPE_PID].pid;
+}
+
+static inline struct pid *task_pgrp(struct task_struct *task)
+{
+ return task->group_leader->pids[PIDTYPE_PGID].pid;
+}
+
+static inline struct pid *task_session(struct task_struct *task)
+{
+ return task->group_leader->pids[PIDTYPE_SID].pid;
+}
+
/**
* pid_alive - check that a task structure is not stale
* @p: Task structure to be checked.
@@ -1016,6 +1064,18 @@ static inline int pid_alive(struct task_struct *p)
return p->pids[PIDTYPE_PID].pid != NULL;
}
+/**
+ * is_init - check if a task structure is the first user space
+ * task the kernel created.
+ * @p: Task structure to be checked.
+ */
+static inline int is_init(struct task_struct *tsk)
+{
+ return tsk->pid == 1;
+}
+
+extern struct pid *cad_pid;
+
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
@@ -1034,7 +1094,6 @@ static inline void put_task_struct(struct task_struct *t)
/* Not implemented yet, only for 486*/
#define PF_STARTING 0x00000002 /* being created */
#define PF_EXITING 0x00000004 /* getting shut down */
-#define PF_DEAD 0x00000008 /* Dead */
#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
#define PF_DUMPCORE 0x00000200 /* dumped core */
@@ -1179,7 +1238,7 @@ extern void switch_uid(struct user_struct *);
#include <asm/current.h>
-extern void do_timer(struct pt_regs *);
+extern void do_timer(unsigned long ticks);
extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state));
extern int FASTCALL(wake_up_process(struct task_struct * tsk));
@@ -1221,10 +1280,15 @@ extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int send_group_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
+extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
+extern int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
+extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
+extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
+extern int kill_pgrp(struct pid *pid, int sig, int priv);
+extern int kill_pid(struct pid *pid, int sig, int priv);
extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);
extern int kill_pg_info(int, struct siginfo *, pid_t);
extern int kill_proc_info(int, struct siginfo *, pid_t);
-extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t, u32);
extern void do_notify_parent(struct task_struct *, int);
extern void force_sig(int, struct task_struct *);
extern void force_sig_specific(int, struct task_struct *);
@@ -1239,6 +1303,11 @@ extern int send_group_sigqueue(int, struct sigqueue *, struct task_struct *);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
+static inline int kill_cad_pid(int sig, int priv)
+{
+ return kill_pid(cad_pid, sig, priv);
+}
+
/* These can be the second arg to send_sig_info/send_group_sig_info. */
#define SEND_SIG_NOINFO ((struct siginfo *) 0)
#define SEND_SIG_PRIV ((struct siginfo *) 1)
@@ -1332,6 +1401,17 @@ extern void wait_task_inactive(struct task_struct * p);
/* de_thread depends on thread_group_leader not being a pid based check */
#define thread_group_leader(p) (p == p->group_leader)
+/* Do to the insanities of de_thread it is possible for a process
+ * to have the pid of the thread group leader without actually being
+ * the thread group leader. For iteration through the pids in proc
+ * all we care about is that we have a task with the appropriate
+ * pid, we don't actually care if we have the right task.
+ */
+static inline int has_group_leader_pid(struct task_struct *p)
+{
+ return p->pid == p->tgid;
+}
+
static inline struct task_struct *next_thread(const struct task_struct *p)
{
return list_entry(rcu_dereference(p->thread_group.next),