sched.c File Reference

Go to the source code of this file.

Classes

struct  rt_prio_array
struct  rt_bandwidth
struct  cfs_rq
struct  rt_rq
struct  rq
struct  rq_iterator

Defines

#define NICE_TO_PRIO(nice)   (MAX_RT_PRIO + (nice) + 20)
#define PRIO_TO_NICE(prio)   ((prio) - MAX_RT_PRIO - 20)
#define TASK_NICE(p)   PRIO_TO_NICE((p)->static_prio)
#define USER_PRIO(p)   ((p)-MAX_RT_PRIO)
#define TASK_USER_PRIO(p)   USER_PRIO((p)->static_prio)
#define MAX_USER_PRIO   (USER_PRIO(MAX_PRIO))
#define NS_TO_JIFFIES(TIME)   ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
#define NICE_0_LOAD   SCHED_LOAD_SCALE
#define NICE_0_SHIFT   SCHED_LOAD_SHIFT
#define DEF_TIMESLICE   (100 * HZ / 1000)
#define RUNTIME_INF   ((u64)~0ULL)
#define CPU_LOAD_IDX_MAX   5
#define for_each_domain(cpu, __sd)   for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
#define cpu_rq(cpu)   (&per_cpu(runqueues, (cpu)))
#define this_rq()   (&__get_cpu_var(runqueues))
#define task_rq(p)   cpu_rq(task_cpu(p))
#define cpu_curr(cpu)   (cpu_rq(cpu)->curr)
#define const_debug   static const
#define SCHED_FEAT(name, enabled)   __SCHED_FEAT_##name ,
#define SCHED_FEAT(name, enabled)   (1UL << __SCHED_FEAT_##name) * enabled |
#define sched_feat(x)   (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
#define prepare_arch_switch(next)   do { } while (0)
#define finish_arch_switch(prev)   do { } while (0)
#define WMULT_CONST   (1UL << 32)
#define WMULT_SHIFT   32
#define SRR(x, y)   (((x) + (1UL << ((y) - 1))) >> (y))
#define WEIGHT_IDLEPRIO   3
#define WMULT_IDLEPRIO   1431655765
#define sched_class_highest   (&rt_sched_class)
#define for_each_class(class)   for (class = sched_class_highest; class; class = class->next)

Enumerations

enum  

Functions

unsigned long long __attribute__ ((weak))
 DDE only uses small parts of this.
 DEFINE_TRACE (sched_wait_task)
 DEFINE_TRACE (sched_wakeup)
 DEFINE_TRACE (sched_wakeup_new)
 DEFINE_TRACE (sched_switch)
 DEFINE_TRACE (sched_migrate_task)
static int rt_policy (int policy)
static int task_has_rt_policy (struct task_struct *p)
static int do_sched_rt_period_timer (struct rt_bandwidth *rt_b, int overrun)
static enum hrtimer_restart sched_rt_period_timer (struct hrtimer *timer)
static void init_rt_bandwidth (struct rt_bandwidth *rt_b, u64 period, u64 runtime)
static int rt_bandwidth_enabled (void)
static void start_rt_bandwidth (struct rt_bandwidth *rt_b)
static DEFINE_MUTEX (sched_domains_mutex)
static void set_task_rq (struct task_struct *p, unsigned int cpu)
static struct task_group * task_group (struct task_struct *p)
static DEFINE_PER_CPU_SHARED_ALIGNED (struct rq, runqueues)
static void check_preempt_curr (struct rq *rq, struct task_struct *p, int sync)
static int cpu_of (struct rq *rq)
static void update_rq_clock (struct rq *rq)
int runqueue_is_locked (void)
 runqueue_is_locked
static u64 global_rt_period (void)
static u64 global_rt_runtime (void)
static int task_current (struct rq *rq, struct task_struct *p)
static int task_running (struct rq *rq, struct task_struct *p)
static void prepare_lock_switch (struct rq *rq, struct task_struct *next)
static void finish_lock_switch (struct rq *rq, struct task_struct *prev)
static struct rq__task_rq_lock (struct task_struct *p) __acquires(rq->lock)
static struct rqtask_rq_lock (struct task_struct *p, unsigned long *flags) __acquires(rq->lock)
void task_rq_unlock_wait (struct task_struct *p)
static void __task_rq_unlock (struct rq *rq) __releases(rq->lock)
static void task_rq_unlock (struct rq *rq, unsigned long *flags) __releases(rq->lock)
static struct rqthis_rq_lock (void)
static void hrtick_clear (struct rq *rq)
static void init_rq_hrtick (struct rq *rq)
static void init_hrtick (void)
static void resched_task (struct task_struct *p)
static unsigned long calc_delta_mine (unsigned long delta_exec, unsigned long weight, struct load_weight *lw)
static void update_load_add (struct load_weight *lw, unsigned long inc)
static void update_load_sub (struct load_weight *lw, unsigned long dec)
static void activate_task (struct rq *rq, struct task_struct *p, int wakeup)
static void cpuacct_charge (struct task_struct *tsk, u64 cputime)
static void inc_cpu_load (struct rq *rq, unsigned long load)
static void dec_cpu_load (struct rq *rq, unsigned long load)
static void inc_nr_running (struct rq *rq)
static void dec_nr_running (struct rq *rq)
static void set_load_weight (struct task_struct *p)
static void update_avg (u64 *avg, u64 sample)
static void enqueue_task (struct rq *rq, struct task_struct *p, int wakeup)
static void dequeue_task (struct rq *rq, struct task_struct *p, int sleep)
static int __normal_prio (struct task_struct *p)
static int normal_prio (struct task_struct *p)
static int effective_prio (struct task_struct *p)
static void deactivate_task (struct rq *rq, struct task_struct *p, int sleep)
int task_curr (const struct task_struct *p)
 task_curr - is this task currently executing on a CPU? : the task in question.
static void __set_task_cpu (struct task_struct *p, unsigned int cpu)
static void check_class_changed (struct rq *rq, struct task_struct *p, const struct sched_class *prev_class, int oldprio, int running)
static int try_to_wake_up (struct task_struct *p, unsigned int state, int sync)
int wake_up_process (struct task_struct *p)
 EXPORT_SYMBOL (wake_up_process)
int wake_up_state (struct task_struct *p, unsigned int state)
static void __sched_fork (struct task_struct *p)
void sched_fork (struct task_struct *p, int clone_flags)
void wake_up_new_task (struct task_struct *p, unsigned long clone_flags)
static void fire_sched_in_preempt_notifiers (struct task_struct *curr)
static void fire_sched_out_preempt_notifiers (struct task_struct *curr, struct task_struct *next)
static void prepare_task_switch (struct rq *rq, struct task_struct *prev, struct task_struct *next)
 prepare_task_switch - prepare to switch tasks : the runqueue preparing to switch : the current task that is being switched out : the task we are going to switch to.
static void finish_task_switch (struct rq *rq, struct task_struct *prev) __releases(rq->lock)
 finish_task_switch - clean up after a task-switch : runqueue associated with task-switch : the thread we just switched away from.
asmlinkage void schedule_tail (struct task_struct *prev) __releases(rq->lock)
 schedule_tail - first thing a freshly forked thread must call.
static void context_switch (struct rq *rq, struct task_struct *prev, struct task_struct *next)
unsigned long nr_running (void)
unsigned long nr_uninterruptible (void)
unsigned long long nr_context_switches (void)
unsigned long nr_iowait (void)
unsigned long nr_active (void)
static void update_cpu_load (struct rq *this_rq)
static void idle_balance (int cpu, struct rq *rq)
 DEFINE_PER_CPU (struct kernel_stat, kstat)
 EXPORT_PER_CPU_SYMBOL (kstat)
unsigned long long task_delta_exec (struct task_struct *p)
void account_user_time (struct task_struct *p, cputime_t cputime, cputime_t cputime_scaled)
static void account_guest_time (struct task_struct *p, cputime_t cputime, cputime_t cputime_scaled)
void account_system_time (struct task_struct *p, int hardirq_offset, cputime_t cputime, cputime_t cputime_scaled)
void account_steal_time (cputime_t cputime)
void account_idle_time (cputime_t cputime)
void account_process_tick (struct task_struct *p, int user_tick)
void account_steal_ticks (unsigned long ticks)
void account_idle_ticks (unsigned long ticks)
cputime_t task_utime (struct task_struct *p)
cputime_t task_stime (struct task_struct *p)
cputime_t task_gtime (struct task_struct *p)
void scheduler_tick (void)
static noinline void __schedule_bug (struct task_struct *prev)
static void schedule_debug (struct task_struct *prev)
static struct task_struct * pick_next_task (struct rq *rq, struct task_struct *prev)
asmlinkage void __sched schedule (void)
 EXPORT_SYMBOL (schedule)
int default_wake_function (wait_queue_t *curr, unsigned mode, int sync, void *key)
 EXPORT_SYMBOL (default_wake_function)
void __wake_up_common (wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync, void *key)
void __wake_up (wait_queue_head_t *q, unsigned int mode, int nr_exclusive, void *key)
 __wake_up - wake up threads blocked on a waitqueue.
 EXPORT_SYMBOL (__wake_up)
void __wake_up_locked (wait_queue_head_t *q, unsigned int mode)
void __wake_up_sync (wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
 __wake_up_sync - wake up threads blocked on a waitqueue.
 EXPORT_SYMBOL_GPL (__wake_up_sync)
void complete (struct completion *x)
 complete: - signals a single thread waiting on this completion : holds the state of this particular completion
 EXPORT_SYMBOL (complete)
void complete_all (struct completion *x)
 complete_all: - signals all threads waiting on this completion : holds the state of this particular completion
 EXPORT_SYMBOL (complete_all)
static long __sched do_wait_for_common (struct completion *x, long timeout, int state)
static long __sched wait_for_common (struct completion *x, long timeout, int state)
void __sched wait_for_completion (struct completion *x)
 wait_for_completion: - waits for completion of a task : holds the state of this particular completion
 EXPORT_SYMBOL (wait_for_completion)
unsigned long __sched wait_for_completion_timeout (struct completion *x, unsigned long timeout)
 wait_for_completion_timeout: - waits for completion of a task (w/timeout) : holds the state of this particular completion : timeout value in jiffies
 EXPORT_SYMBOL (wait_for_completion_timeout)
int __sched wait_for_completion_interruptible (struct completion *x)
 wait_for_completion_interruptible: - waits for completion of a task (w/intr) : holds the state of this particular completion
 EXPORT_SYMBOL (wait_for_completion_interruptible)
unsigned long __sched wait_for_completion_interruptible_timeout (struct completion *x, unsigned long timeout)
 wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr)) : holds the state of this particular completion : timeout value in jiffies
 EXPORT_SYMBOL (wait_for_completion_interruptible_timeout)
int __sched wait_for_completion_killable (struct completion *x)
 wait_for_completion_killable: - waits for completion of a task (killable) : holds the state of this particular completion
 EXPORT_SYMBOL (wait_for_completion_killable)
bool try_wait_for_completion (struct completion *x)
 try_wait_for_completion - try to decrement a completion without blocking : completion structure
 EXPORT_SYMBOL (try_wait_for_completion)
bool completion_done (struct completion *x)
 completion_done - Test to see if a completion has any waiters : completion structure
 EXPORT_SYMBOL (completion_done)
static long __sched sleep_on_common (wait_queue_head_t *q, int state, long timeout)
void __sched interruptible_sleep_on (wait_queue_head_t *q)
 EXPORT_SYMBOL (interruptible_sleep_on)
long __sched interruptible_sleep_on_timeout (wait_queue_head_t *q, long timeout)
 EXPORT_SYMBOL (interruptible_sleep_on_timeout)
void __sched sleep_on (wait_queue_head_t *q)
 EXPORT_SYMBOL (sleep_on)
long __sched sleep_on_timeout (wait_queue_head_t *q, long timeout)
 EXPORT_SYMBOL (sleep_on_timeout)
void set_user_nice (struct task_struct *p, long nice)
 EXPORT_SYMBOL (set_user_nice)
int can_nice (const struct task_struct *p, const int nice)
int task_prio (const struct task_struct *p)
 task_prio - return the priority value of a given task.
int task_nice (const struct task_struct *p)
 task_nice - return the nice value of a given task.
 EXPORT_SYMBOL (task_nice)
int idle_cpu (int cpu)
 idle_cpu - is a given cpu idle currently? : the processor in question.
struct task_struct * idle_task (int cpu)
 idle_task - return the idle task for a given cpu.
static struct task_struct * find_process_by_pid (pid_t pid)
 find_process_by_pid - find a process with a matching PID value.
static void __setscheduler (struct rq *rq, struct task_struct *p, int policy, int prio)
static bool check_same_owner (struct task_struct *p)
static int __sched_setscheduler (struct task_struct *p, int policy, struct sched_param *param, bool user)
int sched_setscheduler (struct task_struct *p, int policy, struct sched_param *param)
 sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
 EXPORT_SYMBOL_GPL (sched_setscheduler)
int sched_setscheduler_nocheck (struct task_struct *p, int policy, struct sched_param *param)
 sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
static int do_sched_setscheduler (pid_t pid, int policy, struct sched_param __user *param)
 SYSCALL_DEFINE3 (sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
 sys_sched_setscheduler - set/change the scheduler policy and RT priority : the pid in question.
 SYSCALL_DEFINE2 (sched_setparam, pid_t, pid, struct sched_param __user *, param)
 sys_sched_setparam - set/change the RT priority of a thread : the pid in question.
 SYSCALL_DEFINE1 (sched_getscheduler, pid_t, pid)
 sys_sched_getscheduler - get the policy (scheduling class) of a thread : the pid in question.
 SYSCALL_DEFINE2 (sched_getparam, pid_t, pid, struct sched_param __user *, param)
 sys_sched_getscheduler - get the RT priority of a thread : the pid in question.
long sched_setaffinity (pid_t pid, const struct cpumask *in_mask)
static int get_user_cpu_mask (unsigned long __user *user_mask_ptr, unsigned len, struct cpumask *new_mask)
 SYSCALL_DEFINE3 (sched_setaffinity, pid_t, pid, unsigned int, len, unsigned long __user *, user_mask_ptr)
 sys_sched_setaffinity - set the cpu affinity of a process : pid of the process : length in bytes of the bitmask pointed to by user_mask_ptr : user-space pointer to the new cpu mask
long sched_getaffinity (pid_t pid, struct cpumask *mask)
 SYSCALL_DEFINE3 (sched_getaffinity, pid_t, pid, unsigned int, len, unsigned long __user *, user_mask_ptr)
 sys_sched_getaffinity - get the cpu affinity of a process : pid of the process : length in bytes of the bitmask pointed to by user_mask_ptr : user-space pointer to hold the current cpu mask
 SYSCALL_DEFINE0 (sched_yield)
 sys_sched_yield - yield the current processor to other threads.
static void __cond_resched (void)
int __sched _cond_resched (void)
 EXPORT_SYMBOL (_cond_resched)
int cond_resched_lock (spinlock_t *lock)
 EXPORT_SYMBOL (cond_resched_lock)
int __sched cond_resched_softirq (void)
 EXPORT_SYMBOL (cond_resched_softirq)
void __sched yield (void)
 yield - yield the current processor to other threads.
 EXPORT_SYMBOL (yield)
void __sched io_schedule (void)
 EXPORT_SYMBOL (io_schedule)
long __sched io_schedule_timeout (long timeout)
 SYSCALL_DEFINE1 (sched_get_priority_max, int, policy)
 sys_sched_get_priority_max - return maximum RT priority.
 SYSCALL_DEFINE1 (sched_get_priority_min, int, policy)
 sys_sched_get_priority_min - return minimum RT priority.
 SYSCALL_DEFINE2 (sched_rr_get_interval, pid_t, pid, struct timespec __user *, interval)
 sys_sched_rr_get_interval - return the default timeslice of a process.
void sched_show_task (struct task_struct *p)
void show_state_filter (unsigned long state_filter)
void __cpuinit init_idle_bootup_task (struct task_struct *idle)
void __cpuinit init_idle (struct task_struct *idle, int cpu)
 init_idle - set up an idle thread for a given CPU : task in question : cpu the idle task belongs to
static void sched_init_granularity (void)
void __init sched_init_smp (void)
int in_sched_functions (unsigned long addr)
static void init_cfs_rq (struct cfs_rq *cfs_rq, struct rq *rq)
static void init_rt_rq (struct rt_rq *rt_rq, struct rq *rq)
void __init sched_init (void)
static void free_fair_sched_group (struct task_group *tg)
static int alloc_fair_sched_group (struct task_group *tg, struct task_group *parent)
static void register_fair_sched_group (struct task_group *tg, int cpu)
static void unregister_fair_sched_group (struct task_group *tg, int cpu)
static void free_rt_sched_group (struct task_group *tg)
static int alloc_rt_sched_group (struct task_group *tg, struct task_group *parent)
static void register_rt_sched_group (struct task_group *tg, int cpu)
static void unregister_rt_sched_group (struct task_group *tg, int cpu)
static int sched_rt_global_constraints (void)
int sched_rt_handler (struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos)

Variables

static struct rt_bandwidth def_rt_bandwidth
const_debug unsigned int sysctl_sched_features
const_debug unsigned int sysctl_sched_nr_migrate = 32
unsigned int sysctl_sched_shares_ratelimit = 250000
unsigned int sysctl_sched_shares_thresh = 4
unsigned int sysctl_sched_rt_period = 1000000
static __read_mostly int scheduler_running
int sysctl_sched_rt_runtime = 950000
static const int prio_to_weight [40]
static const u32 prio_to_wmult [40]
static const char stat_nam [] = TASK_STATE_TO_CHAR_STR
cpumask_var_t nohz_cpu_mask


Define Documentation

#define const_debug   static const

Definition at line 699 of file kernel/sched.c.

#define cpu_curr ( cpu   )     (cpu_rq(cpu)->curr)

Definition at line 686 of file kernel/sched.c.

#define CPU_LOAD_IDX_MAX   5

Definition at line 567 of file kernel/sched.c.

#define cpu_rq ( cpu   )     (&per_cpu(runqueues, (cpu)))

Definition at line 683 of file kernel/sched.c.

#define DEF_TIMESLICE   (100 * HZ / 1000)

Definition at line 131 of file kernel/sched.c.

#define finish_arch_switch ( prev   )     do { } while (0)

Definition at line 885 of file kernel/sched.c.

#define for_each_class ( class   )     for (class = sched_class_highest; class; class = class->next)

Definition at line 1683 of file kernel/sched.c.

#define for_each_domain ( cpu,
__sd   )     for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)

Definition at line 680 of file kernel/sched.c.

#define MAX_USER_PRIO   (USER_PRIO(MAX_PRIO))

Definition at line 115 of file kernel/sched.c.

#define NICE_0_LOAD   SCHED_LOAD_SCALE

Definition at line 122 of file kernel/sched.c.

#define NICE_0_SHIFT   SCHED_LOAD_SHIFT

Definition at line 123 of file kernel/sched.c.

#define NICE_TO_PRIO ( nice   )     (MAX_RT_PRIO + (nice) + 20)

Definition at line 104 of file kernel/sched.c.

#define NS_TO_JIFFIES ( TIME   )     ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))

Definition at line 120 of file kernel/sched.c.

#define prepare_arch_switch ( next   )     do { } while (0)

Definition at line 882 of file kernel/sched.c.

#define PRIO_TO_NICE ( prio   )     ((prio) - MAX_RT_PRIO - 20)

Definition at line 105 of file kernel/sched.c.

#define RUNTIME_INF   ((u64)~0ULL)

Definition at line 136 of file kernel/sched.c.

#define sched_class_highest   (&rt_sched_class)

Definition at line 1682 of file kernel/sched.c.

#define sched_feat (  )     (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))

Definition at line 833 of file kernel/sched.c.

#define SCHED_FEAT ( name,
enabled   )     (1UL << __SCHED_FEAT_##name) * enabled |

Definition at line 733 of file kernel/sched.c.

#define SCHED_FEAT ( name,
enabled   )     __SCHED_FEAT_##name ,

Definition at line 733 of file kernel/sched.c.

#define SRR ( x,
 )     (((x) + (1UL << ((y) - 1))) >> (y))

Definition at line 1290 of file kernel/sched.c.

#define TASK_NICE (  )     PRIO_TO_NICE((p)->static_prio)

Definition at line 106 of file kernel/sched.c.

#define task_rq (  )     cpu_rq(task_cpu(p))

Definition at line 685 of file kernel/sched.c.

#define TASK_USER_PRIO (  )     USER_PRIO((p)->static_prio)

Definition at line 114 of file kernel/sched.c.

 
#define this_rq (  )     (&__get_cpu_var(runqueues))

Definition at line 684 of file kernel/sched.c.

#define USER_PRIO (  )     ((p)-MAX_RT_PRIO)

Definition at line 113 of file kernel/sched.c.

#define WEIGHT_IDLEPRIO   3

Definition at line 1343 of file kernel/sched.c.

#define WMULT_CONST   (1UL << 32)

Definition at line 1282 of file kernel/sched.c.

#define WMULT_IDLEPRIO   1431655765

Definition at line 1344 of file kernel/sched.c.

#define WMULT_SHIFT   32

Definition at line 1285 of file kernel/sched.c.


Enumeration Type Documentation

anonymous enum

Definition at line 727 of file kernel/sched.c.


Function Documentation

unsigned long long __attribute__ ( (weak)   )  [read]

DDE only uses small parts of this.

pcibios_set_pcie_reset_state - set reset state for device dev : the PCI-E device reset : Reset state to enter into

Definition at line 94 of file kernel/sched.c.

static void __cond_resched ( void   )  [static]

Definition at line 5737 of file kernel/sched.c.

static int __normal_prio ( struct task_struct *  p  )  [inline, static]

Definition at line 1746 of file kernel/sched.c.

static void __sched_fork ( struct task_struct *  p  )  [static]

Definition at line 2401 of file kernel/sched.c.

static int __sched_setscheduler ( struct task_struct *  p,
int  policy,
struct sched_param *  param,
bool  user 
) [static]

Definition at line 5281 of file kernel/sched.c.

static noinline void __schedule_bug ( struct task_struct *  prev  )  [static]

Definition at line 4488 of file kernel/sched.c.

static void __set_task_cpu ( struct task_struct *  p,
unsigned int  cpu 
) [inline, static]

Definition at line 1822 of file kernel/sched.c.

static void __setscheduler ( struct rq rq,
struct task_struct *  p,
int  policy,
int  prio 
) [static]

Definition at line 5240 of file kernel/sched.c.

static struct rq* __task_rq_lock ( struct task_struct *  p  )  [static, read]

Definition at line 967 of file kernel/sched.c.

static void __task_rq_unlock ( struct rq rq  )  [static]

Definition at line 1007 of file kernel/sched.c.

void __wake_up ( wait_queue_head_t *  q,
unsigned int  mode,
int  nr_exclusive,
void *  key 
)

__wake_up - wake up threads blocked on a waitqueue.

: the waitqueue : which threads : how many wake-one or wake-many threads to wake up : is directly passed to the wakeup function

Definition at line 4736 of file kernel/sched.c.

void __wake_up_common ( wait_queue_head_t *  q,
unsigned int  mode,
int  nr_exclusive,
int  sync,
void *  key 
)

Definition at line 4715 of file kernel/sched.c.

void __wake_up_locked ( wait_queue_head_t *  q,
unsigned int  mode 
)

Definition at line 4750 of file kernel/sched.c.

void __wake_up_sync ( wait_queue_head_t *  q,
unsigned int  mode,
int  nr_exclusive 
)

__wake_up_sync - wake up threads blocked on a waitqueue.

: the waitqueue : which threads : how many wake-one or wake-many threads to wake up

The sync wakeup differs that the waker knows that it will schedule away soon, so while the target thread will be woken up, it will not be migrated to another CPU - ie. the two threads are 'synchronized' with each other. This can prevent needless bouncing between CPUs.

On UP it can prevent extra preemption.

Definition at line 4769 of file kernel/sched.c.

int __sched _cond_resched ( void   ) 

Definition at line 5754 of file kernel/sched.c.

static void account_guest_time ( struct task_struct *  p,
cputime_t  cputime,
cputime_t  cputime_scaled 
) [static]

Definition at line 4214 of file kernel/sched.c.

void account_idle_ticks ( unsigned long  ticks  ) 

Definition at line 4333 of file kernel/sched.c.

void account_idle_time ( cputime_t  cputime  ) 

Definition at line 4285 of file kernel/sched.c.

void account_process_tick ( struct task_struct *  p,
int  user_tick 
)

Definition at line 4304 of file kernel/sched.c.

void account_steal_ticks ( unsigned long  ticks  ) 

Definition at line 4324 of file kernel/sched.c.

void account_steal_time ( cputime_t  cputime  ) 

Definition at line 4273 of file kernel/sched.c.

void account_system_time ( struct task_struct *  p,
int  hardirq_offset,
cputime_t  cputime,
cputime_t  cputime_scaled 
)

Definition at line 4240 of file kernel/sched.c.

void account_user_time ( struct task_struct *  p,
cputime_t  cputime,
cputime_t  cputime_scaled 
)

Definition at line 4187 of file kernel/sched.c.

static void activate_task ( struct rq rq,
struct task_struct *  p,
int  wakeup 
) [static]

Definition at line 1792 of file kernel/sched.c.

static int alloc_fair_sched_group ( struct task_group *  tg,
struct task_group *  parent 
) [inline, static]

Definition at line 8747 of file kernel/sched.c.

static int alloc_rt_sched_group ( struct task_group *  tg,
struct task_group *  parent 
) [inline, static]

Definition at line 8835 of file kernel/sched.c.

static unsigned long calc_delta_mine ( unsigned long  delta_exec,
unsigned long  weight,
struct load_weight *  lw 
) [static]

Definition at line 1296 of file kernel/sched.c.

int can_nice ( const struct task_struct *  p,
const int  nice 
)

Definition at line 5137 of file kernel/sched.c.

static void check_class_changed ( struct rq rq,
struct task_struct *  p,
const struct sched_class *  prev_class,
int  oldprio,
int  running 
) [inline, static]

Definition at line 1836 of file kernel/sched.c.

static void check_preempt_curr ( struct rq rq,
struct task_struct *  p,
int  sync 
) [inline, static]

Definition at line 659 of file kernel/sched.c.

static bool check_same_owner ( struct task_struct *  p  )  [static]

Definition at line 5268 of file kernel/sched.c.

void complete ( struct completion *  x  ) 

complete: - signals a single thread waiting on this completion : holds the state of this particular completion

This will wake up a single thread waiting on this completion. Threads will be awakened in the same order in which they were queued.

See also complete_all(), wait_for_completion() and related routines.

Definition at line 4795 of file kernel/sched.c.

void complete_all ( struct completion *  x  ) 

complete_all: - signals all threads waiting on this completion : holds the state of this particular completion

This will wake up all threads waiting on this particular completion event.

Definition at line 4812 of file kernel/sched.c.

bool completion_done ( struct completion *  x  ) 

completion_done - Test to see if a completion has any waiters : completion structure

Returns: 0 if there are waiters (wait_for_completion() in progress) 1 if there are no waiters.

Definition at line 4974 of file kernel/sched.c.

int cond_resched_lock ( spinlock_t *  lock  ) 

Definition at line 5773 of file kernel/sched.c.

int __sched cond_resched_softirq ( void   ) 

Definition at line 5791 of file kernel/sched.c.

static void context_switch ( struct rq rq,
struct task_struct *  prev,
struct task_struct *  next 
) [inline, static]

Definition at line 2668 of file kernel/sched.c.

static int cpu_of ( struct rq rq  )  [inline, static]

Definition at line 664 of file kernel/sched.c.

static void cpuacct_charge ( struct task_struct *  tsk,
u64  cputime 
) [inline, static]

Definition at line 1416 of file kernel/sched.c.

static void deactivate_task ( struct rq rq,
struct task_struct *  p,
int  sleep 
) [static]

Definition at line 1804 of file kernel/sched.c.

static void dec_cpu_load ( struct rq rq,
unsigned long  load 
) [inline, static]

Definition at line 1424 of file kernel/sched.c.

static void dec_nr_running ( struct rq rq  )  [static]

Definition at line 1691 of file kernel/sched.c.

int default_wake_function ( wait_queue_t *  curr,
unsigned  mode,
int  sync,
void *  key 
)

Definition at line 4699 of file kernel/sched.c.

static DEFINE_MUTEX ( sched_domains_mutex   )  [static]

DEFINE_PER_CPU ( struct  kernel_stat,
kstat   
)

static DEFINE_PER_CPU_SHARED_ALIGNED ( struct  rq,
runqueues   
) [static]

DEFINE_TRACE ( sched_migrate_task   ) 

DEFINE_TRACE ( sched_switch   ) 

DEFINE_TRACE ( sched_wakeup_new   ) 

DEFINE_TRACE ( sched_wakeup   ) 

DEFINE_TRACE ( sched_wait_task   ) 

static void dequeue_task ( struct rq rq,
struct task_struct *  p,
int  sleep 
) [static]

Definition at line 1730 of file kernel/sched.c.

static int do_sched_rt_period_timer ( struct rt_bandwidth rt_b,
int  overrun 
) [static]

static int do_sched_setscheduler ( pid_t  pid,
int  policy,
struct sched_param __user *  param 
) [static]

Definition at line 5443 of file kernel/sched.c.

static long __sched do_wait_for_common ( struct completion *  x,
long  timeout,
int  state 
) [inline, static]

Definition at line 4824 of file kernel/sched.c.

static int effective_prio ( struct task_struct *  p  )  [static]

Definition at line 1776 of file kernel/sched.c.

static void enqueue_task ( struct rq rq,
struct task_struct *  p,
int  wakeup 
) [static]

Definition at line 1723 of file kernel/sched.c.

EXPORT_PER_CPU_SYMBOL ( kstat   ) 

EXPORT_SYMBOL ( io_schedule   ) 

EXPORT_SYMBOL ( yield   ) 

EXPORT_SYMBOL ( cond_resched_softirq   ) 

EXPORT_SYMBOL ( cond_resched_lock   ) 

EXPORT_SYMBOL ( _cond_resched   ) 

EXPORT_SYMBOL ( task_nice   ) 

EXPORT_SYMBOL ( set_user_nice   ) 

EXPORT_SYMBOL ( sleep_on_timeout   ) 

EXPORT_SYMBOL ( sleep_on   ) 

EXPORT_SYMBOL ( interruptible_sleep_on_timeout   ) 

EXPORT_SYMBOL ( interruptible_sleep_on   ) 

EXPORT_SYMBOL ( completion_done   ) 

EXPORT_SYMBOL ( try_wait_for_completion   ) 

EXPORT_SYMBOL ( wait_for_completion_killable   ) 

EXPORT_SYMBOL ( wait_for_completion_interruptible_timeout   ) 

EXPORT_SYMBOL ( wait_for_completion_interruptible   ) 

EXPORT_SYMBOL ( wait_for_completion_timeout   ) 

EXPORT_SYMBOL ( wait_for_completion   ) 

EXPORT_SYMBOL ( complete_all   ) 

EXPORT_SYMBOL ( complete   ) 

EXPORT_SYMBOL ( __wake_up   ) 

EXPORT_SYMBOL ( default_wake_function   ) 

EXPORT_SYMBOL ( schedule   ) 

EXPORT_SYMBOL ( wake_up_process   ) 

EXPORT_SYMBOL_GPL ( sched_setscheduler   ) 

EXPORT_SYMBOL_GPL ( __wake_up_sync   ) 

static struct task_struct* find_process_by_pid ( pid_t  pid  )  [static, read]

find_process_by_pid - find a process with a matching PID value.

: the pid in question.

Definition at line 5233 of file kernel/sched.c.

static void finish_lock_switch ( struct rq rq,
struct task_struct *  prev 
) [inline, static]

Definition at line 903 of file kernel/sched.c.

static void finish_task_switch ( struct rq rq,
struct task_struct *  prev 
) [static]

finish_task_switch - clean up after a task-switch : runqueue associated with task-switch : the thread we just switched away from.

finish_task_switch must be called after the context switch, paired with a prepare_task_switch call before the context switch. finish_task_switch will reconcile locking set up by prepare_task_switch, and do any other architecture-specific cleanup actions.

Note that we may have delayed dropping an mm in context_switch(). If so, we finish that here outside of the runqueue lock. (Doing it with the lock held can cause deadlocks; see schedule() for details.)

Definition at line 2605 of file kernel/sched.c.

static void fire_sched_in_preempt_notifiers ( struct task_struct *  curr  )  [static]

Definition at line 2556 of file kernel/sched.c.

static void fire_sched_out_preempt_notifiers ( struct task_struct *  curr,
struct task_struct *  next 
) [static]

Definition at line 2561 of file kernel/sched.c.

static void free_fair_sched_group ( struct task_group *  tg  )  [inline, static]

Definition at line 8742 of file kernel/sched.c.

static void free_rt_sched_group ( struct task_group *  tg  )  [inline, static]

Definition at line 8830 of file kernel/sched.c.

static int get_user_cpu_mask ( unsigned long __user *  user_mask_ptr,
unsigned  len,
struct cpumask *  new_mask 
) [static]

Definition at line 5620 of file kernel/sched.c.

static u64 global_rt_period ( void   )  [inline, static]

Definition at line 868 of file kernel/sched.c.

static u64 global_rt_runtime ( void   )  [inline, static]

Definition at line 873 of file kernel/sched.c.

static void hrtick_clear ( struct rq rq  )  [inline, static]

Definition at line 1171 of file kernel/sched.c.

static void idle_balance ( int  cpu,
struct rq rq 
) [inline, static]

Definition at line 4145 of file kernel/sched.c.

int idle_cpu ( int  cpu  ) 

idle_cpu - is a given cpu idle currently? : the processor in question.

Definition at line 5215 of file kernel/sched.c.

struct task_struct* idle_task ( int  cpu  )  [read]

idle_task - return the idle task for a given cpu.

: the processor in question.

Definition at line 5224 of file kernel/sched.c.

int in_sched_functions ( unsigned long  addr  ) 

Definition at line 8223 of file kernel/sched.c.

static void inc_cpu_load ( struct rq rq,
unsigned long  load 
) [inline, static]

Definition at line 1419 of file kernel/sched.c.

static void inc_nr_running ( struct rq rq  )  [static]

Definition at line 1686 of file kernel/sched.c.

static void init_cfs_rq ( struct cfs_rq cfs_rq,
struct rq rq 
) [static]

Definition at line 8230 of file kernel/sched.c.

static void init_hrtick ( void   )  [inline, static]

Definition at line 1179 of file kernel/sched.c.

void __cpuinit init_idle ( struct task_struct *  idle,
int  cpu 
)

init_idle - set up an idle thread for a given CPU : task in question : cpu the idle task belongs to

NOTE: this function does not set the idle thread's NEED_RESCHED flag, to make booting more robust.

Definition at line 6039 of file kernel/sched.c.

void __cpuinit init_idle_bootup_task ( struct task_struct *  idle  ) 

Definition at line 6026 of file kernel/sched.c.

static void init_rq_hrtick ( struct rq rq  )  [inline, static]

Definition at line 1175 of file kernel/sched.c.

static void init_rt_bandwidth ( struct rt_bandwidth rt_b,
u64  period,
u64  runtime 
) [static]

Definition at line 222 of file kernel/sched.c.

static void init_rt_rq ( struct rt_rq rt_rq,
struct rq rq 
) [static]

Definition at line 8240 of file kernel/sched.c.

void __sched interruptible_sleep_on ( wait_queue_head_t *  q  ) 

Definition at line 5008 of file kernel/sched.c.

long __sched interruptible_sleep_on_timeout ( wait_queue_head_t *  q,
long  timeout 
)

Definition at line 5015 of file kernel/sched.c.

void __sched io_schedule ( void   ) 

Definition at line 5826 of file kernel/sched.c.

long __sched io_schedule_timeout ( long  timeout  ) 

Definition at line 5838 of file kernel/sched.c.

static int normal_prio ( struct task_struct *  p  )  [inline, static]

Definition at line 1758 of file kernel/sched.c.

unsigned long nr_active ( void   ) 

Definition at line 2772 of file kernel/sched.c.

unsigned long long nr_context_switches ( void   ) 

Definition at line 2751 of file kernel/sched.c.

unsigned long nr_iowait ( void   ) 

Definition at line 2762 of file kernel/sched.c.

unsigned long nr_running ( void   ) 

Definition at line 2724 of file kernel/sched.c.

unsigned long nr_uninterruptible ( void   ) 

Definition at line 2734 of file kernel/sched.c.

static struct task_struct* pick_next_task ( struct rq rq,
struct task_struct *  prev 
) [static, read]

Definition at line 4534 of file kernel/sched.c.

static void prepare_lock_switch ( struct rq rq,
struct task_struct *  next 
) [inline, static]

Definition at line 899 of file kernel/sched.c.

static void prepare_task_switch ( struct rq rq,
struct task_struct *  prev,
struct task_struct *  next 
) [inline, static]

prepare_task_switch - prepare to switch tasks : the runqueue preparing to switch : the current task that is being switched out : the task we are going to switch to.

This is called with the rq lock held and interrupts off. It must be paired with a subsequent finish_task_switch after the context switch.

prepare_task_switch sets up locking and calls architecture specific hooks.

Definition at line 2582 of file kernel/sched.c.

static void register_fair_sched_group ( struct task_group *  tg,
int  cpu 
) [inline, static]

Definition at line 8752 of file kernel/sched.c.

static void register_rt_sched_group ( struct task_group *  tg,
int  cpu 
) [inline, static]

Definition at line 8840 of file kernel/sched.c.

static void resched_task ( struct task_struct *  p  )  [static]

Definition at line 1272 of file kernel/sched.c.

static int rt_bandwidth_enabled ( void   )  [inline, static]

Definition at line 234 of file kernel/sched.c.

static int rt_policy ( int  policy  )  [inline, static]

Definition at line 168 of file kernel/sched.c.

int runqueue_is_locked ( void   ) 

runqueue_is_locked

Returns true if the current cpu runqueue is locked. This interface allows printk to be called with the runqueue lock held and know whether or not it is OK to wake up the klogd.

Definition at line 709 of file kernel/sched.c.

void sched_fork ( struct task_struct *  p,
int  clone_flags 
)

Definition at line 2441 of file kernel/sched.c.

long sched_getaffinity ( pid_t  pid,
struct cpumask *  mask 
)

Definition at line 5653 of file kernel/sched.c.

void __init sched_init ( void   ) 

Definition at line 8331 of file kernel/sched.c.

static void sched_init_granularity ( void   )  [inline, static]

Definition at line 6092 of file kernel/sched.c.

void __init sched_init_smp ( void   ) 

Definition at line 8217 of file kernel/sched.c.

static int sched_rt_global_constraints ( void   )  [static]

Definition at line 9271 of file kernel/sched.c.

int sched_rt_handler ( struct ctl_table *  table,
int  write,
struct file *  filp,
void __user *  buffer,
size_t *  lenp,
loff_t *  ppos 
)

Definition at line 9293 of file kernel/sched.c.

static enum hrtimer_restart sched_rt_period_timer ( struct hrtimer *  timer  )  [static]

Definition at line 200 of file kernel/sched.c.

long sched_setaffinity ( pid_t  pid,
const struct cpumask *  in_mask 
)

Definition at line 5553 of file kernel/sched.c.

int sched_setscheduler ( struct task_struct *  p,
int  policy,
struct sched_param *  param 
)

sched_setscheduler - change the scheduling policy and/or RT priority of a thread.

: the task in question. : new policy.

Parameters:
 structure containing the new RT priority.
NOTE that the task may be already dead.

Definition at line 5416 of file kernel/sched.c.

int sched_setscheduler_nocheck ( struct task_struct *  p,
int  policy,
struct sched_param *  param 
)

sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.

: the task in question. : new policy.

Parameters:
 structure containing the new RT priority.
Just like sched_setscheduler, only don't bother checking if the current context has permission. For example, this is needed in stop_machine(): we create temporary high priority worker threads, but our caller might not have that capability.

Definition at line 5436 of file kernel/sched.c.

void sched_show_task ( struct task_struct *  p  ) 

Definition at line 5958 of file kernel/sched.c.

asmlinkage void __sched schedule ( void   ) 

Definition at line 4565 of file kernel/sched.c.

static void schedule_debug ( struct task_struct *  prev  )  [inline, static]

Definition at line 4509 of file kernel/sched.c.

asmlinkage void schedule_tail ( struct task_struct *  prev  ) 

schedule_tail - first thing a freshly forked thread must call.

: the thread we just switched away from.

Definition at line 2649 of file kernel/sched.c.

void scheduler_tick ( void   ) 

Definition at line 4406 of file kernel/sched.c.

static void set_load_weight ( struct task_struct *  p  )  [static]

Definition at line 1696 of file kernel/sched.c.

static void set_task_rq ( struct task_struct *  p,
unsigned int  cpu 
) [inline, static]

Definition at line 411 of file kernel/sched.c.

void set_user_nice ( struct task_struct *  p,
long  nice 
)

Definition at line 5084 of file kernel/sched.c.

void show_state_filter ( unsigned long  state_filter  ) 

Definition at line 5991 of file kernel/sched.c.

void __sched sleep_on ( wait_queue_head_t *  q  ) 

Definition at line 5021 of file kernel/sched.c.

static long __sched sleep_on_common ( wait_queue_head_t *  q,
int  state,
long  timeout 
) [static]

Definition at line 4987 of file kernel/sched.c.

long __sched sleep_on_timeout ( wait_queue_head_t *  q,
long  timeout 
)

Definition at line 5027 of file kernel/sched.c.

static void start_rt_bandwidth ( struct rt_bandwidth rt_b  )  [static]

Definition at line 239 of file kernel/sched.c.

SYSCALL_DEFINE0 ( sched_yield   ) 

sys_sched_yield - yield the current processor to other threads.

This function yields the current CPU to other tasks. If there are no other threads running on this CPU then this function will return.

Definition at line 5715 of file kernel/sched.c.

SYSCALL_DEFINE1 ( sched_get_priority_min  ,
int  ,
policy   
)

sys_sched_get_priority_min - return minimum RT priority.

: scheduling class.

this syscall returns the minimum rt_priority that can be used by a given scheduling class.

Definition at line 5883 of file kernel/sched.c.

SYSCALL_DEFINE1 ( sched_get_priority_max  ,
int  ,
policy   
)

sys_sched_get_priority_max - return maximum RT priority.

: scheduling class.

this syscall returns the maximum rt_priority that can be used by a given scheduling class.

Definition at line 5858 of file kernel/sched.c.

SYSCALL_DEFINE1 ( sched_getscheduler  ,
pid_t  ,
pid   
)

sys_sched_getscheduler - get the policy (scheduling class) of a thread : the pid in question.

Definition at line 5494 of file kernel/sched.c.

SYSCALL_DEFINE2 ( sched_rr_get_interval  ,
pid_t  ,
pid  ,
struct timespec __user *  ,
interval   
)

sys_sched_rr_get_interval - return the default timeslice of a process.

: pid of the process. : userspace pointer to the timeslice value.

this syscall writes the default timeslice value of a given process into the user-space timespec buffer. A value of '0' means infinity.

Definition at line 5908 of file kernel/sched.c.

SYSCALL_DEFINE2 ( sched_getparam  ,
pid_t  ,
pid  ,
struct sched_param __user *  ,
param   
)

sys_sched_getscheduler - get the RT priority of a thread : the pid in question.

Parameters:
 structure containing the RT priority.

Definition at line 5519 of file kernel/sched.c.

SYSCALL_DEFINE2 ( sched_setparam  ,
pid_t  ,
pid  ,
struct sched_param __user *  ,
param   
)

sys_sched_setparam - set/change the RT priority of a thread : the pid in question.

Parameters:
 structure containing the new RT priority.

Definition at line 5485 of file kernel/sched.c.

SYSCALL_DEFINE3 ( sched_getaffinity  ,
pid_t  ,
pid  ,
unsigned  int,
len  ,
unsigned long __user *  ,
user_mask_ptr   
)

sys_sched_getaffinity - get the cpu affinity of a process : pid of the process : length in bytes of the bitmask pointed to by user_mask_ptr : user-space pointer to hold the current cpu mask

Definition at line 5685 of file kernel/sched.c.

SYSCALL_DEFINE3 ( sched_setaffinity  ,
pid_t  ,
pid  ,
unsigned  int,
len  ,
unsigned long __user *  ,
user_mask_ptr   
)

sys_sched_setaffinity - set the cpu affinity of a process : pid of the process : length in bytes of the bitmask pointed to by user_mask_ptr : user-space pointer to the new cpu mask

Definition at line 5637 of file kernel/sched.c.

SYSCALL_DEFINE3 ( sched_setscheduler  ,
pid_t  ,
pid  ,
int  ,
policy  ,
struct sched_param __user *  ,
param   
)

sys_sched_setscheduler - set/change the scheduler policy and RT priority : the pid in question.

: new policy.

Parameters:
 structure containing the new RT priority.

Definition at line 5470 of file kernel/sched.c.

int task_curr ( const struct task_struct *  p  )  [inline]

task_curr - is this task currently executing on a CPU? : the task in question.

Definition at line 1817 of file kernel/sched.c.

static int task_current ( struct rq rq,
struct task_struct *  p 
) [inline, static]

Definition at line 888 of file kernel/sched.c.

unsigned long long task_delta_exec ( struct task_struct *  p  ) 

Definition at line 4159 of file kernel/sched.c.

static struct task_group* task_group ( struct task_struct *  p  )  [static, read]

Definition at line 412 of file kernel/sched.c.

cputime_t task_gtime ( struct task_struct *  p  )  [inline]

Definition at line 4394 of file kernel/sched.c.

static int task_has_rt_policy ( struct task_struct *  p  )  [inline, static]

Definition at line 175 of file kernel/sched.c.

int task_nice ( const struct task_struct *  p  ) 

task_nice - return the nice value of a given task.

: the task in question.

Definition at line 5205 of file kernel/sched.c.

int task_prio ( const struct task_struct *  p  ) 

task_prio - return the priority value of a given task.

: the task in question.

This is the priority value as seen by users in /proc. RT tasks are offset by -200. Normal tasks are centered around 0, value goes from -16 to +15.

Definition at line 5196 of file kernel/sched.c.

static struct rq* task_rq_lock ( struct task_struct *  p,
unsigned long *  flags 
) [static, read]

Definition at line 984 of file kernel/sched.c.

static void task_rq_unlock ( struct rq rq,
unsigned long *  flags 
) [inline, static]

Definition at line 1013 of file kernel/sched.c.

void task_rq_unlock_wait ( struct task_struct *  p  ) 

Definition at line 999 of file kernel/sched.c.

static int task_running ( struct rq rq,
struct task_struct *  p 
) [inline, static]

Definition at line 894 of file kernel/sched.c.

cputime_t task_stime ( struct task_struct *  p  ) 

Definition at line 4375 of file kernel/sched.c.

cputime_t task_utime ( struct task_struct *  p  ) 

Definition at line 4354 of file kernel/sched.c.

static struct rq* this_rq_lock ( void   )  [static, read]

Definition at line 1022 of file kernel/sched.c.

static int try_to_wake_up ( struct task_struct *  p,
unsigned int  state,
int  sync 
) [static]

Definition at line 2276 of file kernel/sched.c.

bool try_wait_for_completion ( struct completion *  x  ) 

try_wait_for_completion - try to decrement a completion without blocking : completion structure

Returns: 0 if a decrement cannot be done without blocking 1 if a decrement succeeded.

If a completion is being used as a counting completion, attempt to decrement the counter without blocking. This enables us to avoid waiting if the resource the completion is protecting is not available.

Definition at line 4952 of file kernel/sched.c.

static void unregister_fair_sched_group ( struct task_group *  tg,
int  cpu 
) [inline, static]

Definition at line 8756 of file kernel/sched.c.

static void unregister_rt_sched_group ( struct task_group *  tg,
int  cpu 
) [inline, static]

Definition at line 8844 of file kernel/sched.c.

static void update_avg ( u64 *  avg,
u64  sample 
) [static]

Definition at line 1717 of file kernel/sched.c.

static void update_cpu_load ( struct rq this_rq  )  [static]

Definition at line 2791 of file kernel/sched.c.

static void update_load_add ( struct load_weight *  lw,
unsigned long  inc 
) [inline, static]

Definition at line 1322 of file kernel/sched.c.

static void update_load_sub ( struct load_weight *  lw,
unsigned long  dec 
) [inline, static]

Definition at line 1328 of file kernel/sched.c.

static void update_rq_clock ( struct rq rq  )  [inline, static]

Definition at line 688 of file kernel/sched.c.

static long __sched wait_for_common ( struct completion *  x,
long  timeout,
int  state 
) [static]

Definition at line 4850 of file kernel/sched.c.

void __sched wait_for_completion ( struct completion *  x  ) 

wait_for_completion: - waits for completion of a task : holds the state of this particular completion

This waits to be signaled for completion of a specific task. It is NOT interruptible and there is no timeout.

See also similar routines (i.e. wait_for_completion_timeout()) with timeout and interrupt capability. Also see complete().

Definition at line 4870 of file kernel/sched.c.

int __sched wait_for_completion_interruptible ( struct completion *  x  ) 

wait_for_completion_interruptible: - waits for completion of a task (w/intr) : holds the state of this particular completion

This waits for completion of a specific task to be signaled. It is interruptible.

Definition at line 4899 of file kernel/sched.c.

unsigned long __sched wait_for_completion_interruptible_timeout ( struct completion *  x,
unsigned long  timeout 
)

wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr)) : holds the state of this particular completion : timeout value in jiffies

This waits for either a completion of a specific task to be signaled or for a specified timeout to expire. It is interruptible. The timeout is in jiffies.

Definition at line 4917 of file kernel/sched.c.

int __sched wait_for_completion_killable ( struct completion *  x  ) 

wait_for_completion_killable: - waits for completion of a task (killable) : holds the state of this particular completion

This waits to be signaled for completion of a specific task. It can be interrupted by a kill signal.

Definition at line 4931 of file kernel/sched.c.

unsigned long __sched wait_for_completion_timeout ( struct completion *  x,
unsigned long  timeout 
)

wait_for_completion_timeout: - waits for completion of a task (w/timeout) : holds the state of this particular completion : timeout value in jiffies

This waits for either a completion of a specific task to be signaled or for a specified timeout to expire. The timeout is in jiffies. It is not interruptible.

Definition at line 4886 of file kernel/sched.c.

void wake_up_new_task ( struct task_struct *  p,
unsigned long  clone_flags 
)

Definition at line 2480 of file kernel/sched.c.

int wake_up_process ( struct task_struct *  p  ) 

Definition at line 2383 of file kernel/sched.c.

int wake_up_state ( struct task_struct *  p,
unsigned int  state 
)

Definition at line 2389 of file kernel/sched.c.

void __sched yield ( void   ) 

yield - yield the current processor to other threads.

This is a shortcut for kernel-space yielding - it marks the thread runnable and calls sys_sched_yield().

Definition at line 5812 of file kernel/sched.c.


Variable Documentation

struct rt_bandwidth def_rt_bandwidth [static]

Definition at line 196 of file kernel/sched.c.

cpumask_var_t nohz_cpu_mask

Definition at line 6080 of file kernel/sched.c.

const int prio_to_weight[40] [static]

Initial value:

 {
      88761,     71755,     56483,     46273,     36291,
      29154,     23254,     18705,     14949,     11916,
       9548,      7620,      6100,      4904,      3906,
       3121,      2501,      1991,      1586,      1277,
       1024,       820,       655,       526,       423,
        335,       272,       215,       172,       137,
        110,        87,        70,        56,        45,
         36,        29,        23,        18,        15,
}

Definition at line 1358 of file kernel/sched.c.

const u32 prio_to_wmult[40] [static]

Initial value:

 {
      48388,     59856,     76040,     92818,    118348,
     147320,    184698,    229616,    287308,    360437,
     449829,    563644,    704093,    875809,   1099582,
    1376151,   1717300,   2157191,   2708050,   3363326,
    4194304,   5237765,   6557202,   8165337,  10153587,
   12820798,  15790321,  19976592,  24970740,  31350126,
   39045157,  49367440,  61356676,  76695844,  95443717,
  119304647, 148102320, 186737708, 238609294, 286331153,
}

Definition at line 1376 of file kernel/sched.c.

Definition at line 860 of file kernel/sched.c.

const char stat_nam[] = TASK_STATE_TO_CHAR_STR [static]

Definition at line 5956 of file kernel/sched.c.

const_debug unsigned int sysctl_sched_features

Initial value:


        0

Definition at line 736 of file kernel/sched.c.

const_debug unsigned int sysctl_sched_nr_migrate = 32

Definition at line 839 of file kernel/sched.c.

unsigned int sysctl_sched_rt_period = 1000000

Definition at line 858 of file kernel/sched.c.

Definition at line 866 of file kernel/sched.c.

unsigned int sysctl_sched_shares_ratelimit = 250000

Definition at line 845 of file kernel/sched.c.

unsigned int sysctl_sched_shares_thresh = 4

Definition at line 852 of file kernel/sched.c.


Generated on Wed Apr 11 06:39:16 2012 for DDE - The L4 Device Driver Environment by  doxygen 1.5.6